def merge(file, langs): input_helper_name = os.path.join(work_dir, os.path.dirname(file), ".helper", os.path.basename(file)) input_file_name = os.path.join(work_dir, file) helper_document = ET.parse(input_helper_name).getroot() translated_document = ET.parse(input_file_name).getroot() merge_docs(helper_document, translated_document, langs) document = helper_document remove_ids(document) if not "no-format" in options: create_node_order_helper(document) format_xml(document) all_document = ET.ElementTree(document) all_document.write(file, "utf-8") #write copy with IDs and hash, so that when this file is #next splited, changed tags get marked with "changed" create_new_ids(document) all_document.write(input_helper_name, "utf-8") if not "no-delete-rip" in options: os.unlink(input_file_name)
def getHostGuestMapping(self): """ Returns dictionary with host to guest mapping, e.g.: { 'host_id_1': ['guest1', 'guest2'], 'host_id_2': ['guest3', 'guest4'], } """ mapping = {} hosts_xml = ElementTree.parse(self.get(self.hosts_url)) vms_xml = ElementTree.parse(self.get(self.vms_url)) for host in hosts_xml.findall('host'): id = host.get('id') mapping[id] = [] for vm in vms_xml.findall('vm'): guest_id = vm.get('id') host = vm.find('host') if host is None: # Guest don't have any host continue host_id = host.get('id') if host_id not in mapping.keys(): self.logger.warning("Guest %s claims that it belongs to host %s which doen't exist" % (guest_id, host_id)) else: mapping[host_id].append(guest_id) return mapping
def main(): #define the options usage = "usage: %prog [options]" version = "%prog 0.2.6" options = [] parser = OptionParser(usage=usage, version=version, option_list=options) #parse the options (options, arguments) = parser.parse_args() first = ET.parse(arguments[0]).getroot() firstChildren = [] for child in first: children = {} for x in child: children[x.tag] = x.text firstChildren.append(children) second = ET.parse(arguments[1]).getroot() secondChildren = [] for child in second: children = {} for x in child: children[x.tag] = x.text secondChildren.append(children) differ = False for child in firstChildren: if not child in secondChildren: differ = True break if differ: print "Score files differ"
def merge (file, langs): input_helper_name = os.path.join (work_dir, os.path.dirname (file), ".helper", os.path.basename (file) ) input_file_name = os.path.join (work_dir, file) helper_document = ET.parse (input_helper_name).getroot() translated_document = ET.parse (input_file_name).getroot() merge_docs (helper_document, translated_document, langs) document = helper_document remove_ids (document) if not "no-format" in options: create_node_order_helper (document) format_xml (document) all_document = ET.ElementTree (document) all_document.write (file, "utf-8") #write copy with IDs and hash, so that when this file is #next splited, changed tags get marked with "changed" create_new_ids (document) all_document.write (input_helper_name, "utf-8") if not "no-delete-rip" in options: os.unlink (input_file_name)
def getHostGuestMapping(self): """ Returns dictionary with host to guest mapping, e.g.: { 'host_id_1': ['guest1', 'guest2'], 'host_id_2': ['guest3', 'guest4'], } """ mapping = {} hosts_xml = ElementTree.parse(self.get(self.hosts_url)) vms_xml = ElementTree.parse(self.get(self.vms_url)) for host in hosts_xml.findall('host'): id = host.get('id') mapping[id] = [] for vm in vms_xml.findall('vm'): guest_id = vm.get('id') host = vm.find('host') if host is None: # Guest don't have any host continue host_id = host.get('id') if host_id not in mapping.keys(): self.logger.warning( "Guest %s claims that it belongs to host %s which doen't exist" % (guest_id, host_id)) else: mapping[host_id].append(guest_id) return mapping
def xmlvalid(self, xml, xsd): """Test whether an XML document is valid against an XSD Schema Parameters: * xml: XML content * xsd: pointer to XML Schema (local file path or URL) """ xsd1 = etree.parse(xsd) xsd2 = etree.XMLSchema(xsd1) doc = etree.parse(StringIO(xml)) return xsd2.validate(doc)
def main(xldffile, inldffile, outldffile): grammar = {} if xldffile.find('/') < 0: localpath = '' else: localpath = '/'.join(xldffile.split('/')[:-1]) + '/' xtree = ET.parse(xldffile) ltree = ET.parse(inldffile) for cmd in xtree.findall('*'): xldf_performCommand(localpath, cmd, ltree) normalise(ltree) ltree.write(outldffile) return
def _run_query(self, query): self.setQuery(query) response = SPARQLWrapper.query(self).response try: doc = ElementTree.parse(response) except Exception as e: if settings.DEBUG: readable_response = requests.post( settings.FOUR_STORE_URIS['SPARQL'], data={'query': query}) response_text = utils.line_numbered_string( readable_response.text) raise FourStoreException( "Parsing Exception \"%s\"\nQuery: %s\nResponse:\n%s" % (e, query, response_text)) else: raise FourStoreException( "Parsing Exception \"%s\"\nQuery: %s" % (e, query)) try: dom = TraverseSPARQLResultDOM(doc, asDictionary=True) except Exception as e: raise FourStoreException("SPARQL DOM Traversal Exception \"%s\"" % e) return TraverseSPARQLResultDOM(doc, asDictionary=True)
def get_episode_by_season_ep(self, show_id, season_num, ep_num): """Get the episode object matching this episode_id.""" url = "%s/series/%s/default/%s/%s" % (self.base_key_url, show_id, season_num, ep_num) data = urllib.urlopen(url) episode = None print url #code to check if data has been returned temp_data = str(data.read()) if temp_data.startswith('<?xml') == False: print 'No data returned ', temp_data return episode data = urllib.urlopen(url) try: tree = ET.parse(data) episode_node = tree.find("Episode") episode = TheTVDB.Episode(episode_node, self.mirror_url) except SyntaxError: pass return episode
def __init__(self, filename): self.filename = filename self.tree = ET.parse(filename) self.nr_articles = 0 self.article_titles = Set() self.article_redirects = {} self.articles = {} self.messages = {} self.message_redirects = {} self.meta_info = {} self.categories = {} self.namespaces = self._get_namespaces() x_pages = self.tree.findall(mwxml + "page") for x_page in x_pages: title = x_page.find(mwxml + "title").text text = x_page.find(mwxml + "revision").find(mwxml + "text").text ns, ns_name = 0, "" for ns_text in self.namespaces: if title[:len(ns_text)] == ns_text: ns = self.namespaces[ns_text] nsname = ns_text if ns == 0: self.articles[title] = text elif ns == 8: self.messages[title] = text elif ns == 14: self.categories[title] = text
def parse(self): """ parses the associated cbxml-file. """ tree = ET.parse(self._file) root = tree.getroot() for child in root._children: childType = child.attrib.get('type') childType = str(childType).strip().lower() if childType == "asset": self._parseAsset(child) elif childType == "snippet": self._parseSnippet(child) elif childType == "tag": self._parseTag(child) elif childType == "list": self._parseList(child) elif childType == "folder": self._parseFolder(child) elif childType == "search": pass elif childType == "none": pass else: print "Oh. A new type: ", childType
def printUserCount(prj): url = "http://www.ohloh.net/projects.xml?query=%s&%s" % (prj, params) print url f = urllib.urlopen(url) # Parse the response into a structured XML object tree = ET.parse(f) # Did Ohloh return an error? elem = tree.getroot() error = elem.find("error") if error != None: print 'Ohloh returned:', ET.tostring(error), sys.exit() # Output all the immediate child properties of the first project firstPrj = elem.find("result/project") print '{' if firstPrj != None: for node in firstPrj: if node.tag in [ 'name', 'url', 'homepage_url', 'user_count', 'average_rating', 'rating_count', 'review_count' ]: print "\t%s:\t%s," % (node.tag, node.text) print '},'
def config_exists(self, name): XMLtree = ET.parse("Configuration/config.xml") doc = XMLtree.getroot() for elem in doc.findall('config'): if elem.get('name') == name: return True return False
def loadConfig( filename ): global CONFIGTREE global HIDELIB global EMORYCRFLIB global CRFMODELDIR global MAXMEM global DICTIONARY CONFIGTREE = ElementTree.parse(filename) croot = CONFIGTREE.getroot() HIDELIB = croot.find('hidelib').text EMORYCRFLIB = croot.find('crflib').text CRFMODELDIR = croot.find('crfmodeldir').text MAXMEM = croot.find('maxmem').text DICTIONARYDIR = croot.find('dictionary').text DICTIONARY = dict() output = "" output += "reading dictionary from " + DICTIONARYDIR for fileName in os.listdir ( DICTIONARYDIR ): m = re.match('(.*).txt$', fileName) if m: dictname = m.group(1) #open file and populate appropriate dictionary #output += "loading " + dictname + " dictionary from " + fileName + "\n" DICTIONARY[dictname] = dict() f = open( DICTIONARYDIR + "/" + fileName, 'r' ) while 1: line = f.readline() if not line: break term = line.rstrip().lower() #output += "adding " + term + " to " + dictname DICTIONARY[dictname][term] = 1 return output
def appendXbgf(tree,xbgfFile): if not tree: tree = ET.Element(slpsns.xbgf_('sequence')) xtree = ET.parse(xbgfFile) for t in xtree.findall('*'): tree.append(t) return tree
def getStatus(app_name, stream_name): # Retrieve and write the xml file getPage(url) # Define the xmltree xmltree = ET.parse(xmlpath) root = xmltree.getroot() # Define data values vhost = root.find("VHost") for app in vhost: for apps in app: if app.getchildren()[0].text == app_name: for inst in apps: if len(apps.getchildren()) < 9: print "CRITICAL: %s stream not found in server" % ( stream_name) sys.exit(2) elif len(inst.getchildren()) > 0 and inst.getchildren()[0].text == stream_name: print "Status: OK, %s Total Sessions: %s" % ( stream_name, inst.getchildren()[6].text) sys.exit(0) else: print "CRITICAL: Application not found in the server" sys.exit(2) print "CRITICAL: Stream not found in the server" sys.exit(2)
def run(file,debug=True): """Parse the XML file, create the environment, and ....leaving the running up to the libraries!""" #we use the first child cause we don't want to deal with the #document tag #initiate the base document doc = node() #insert the application node into scope basetags.__dict__['application'] = doc #get the dom dom = et.parse(file).getroot() #assemble the child nodes children = [] for child in dom.getchildren(): new_child = doc.create(child) if new_child is not None: children.append(new_child) doc.child_nodes = children call_handlers(doc)
def write(any_list, mode): try: import elementtree.ElementTree as et except: print "Please Run dep_install first" # XML path variable path = get_xml_path() # Code Retriving try: tree = et.parse(path) root = tree.getroot() except: print ".data.xml not found, exiting" exit() # Writing it to xml again according to mode if mode == 1: # Write it in <apps></apps> for apps in root: if apps.tag == 'apps': for i in range( len(any_list) ): # I Am assuming that length of any_list and <gen></gen> is same = 5 apps[i].text = any_list[i] else: # Write it in <files></files> for files in root: if files.tag == 'files': for i in range(len(any_list)): files[i].text = any_list[i] tree.write(path)
def convert_files3(): files = ["/lfs/local/0/mraison/a4_posthistory.xml"] for fname in files: tree = ET.parse(fname) rows = tree.getroot() for row in rows: print row.attrib
def getData(earth=False): filename = 'co12_d00_shp' + ['-76', ''][earth] + '/co12_d00.gpx' print 'Reading %s...' % filename xmlRoot = ET.parse(filename) counties = {} for xmlCounty in xmlRoot.getiterator('rte'): name = xmlCounty.findtext('name').strip() number = xmlCounty.findtext('number').strip() #print '%s County' % name ## Correct error in census data for Wentworth's Location #if( name == "Wentworth" and number == '9' ): # name = "Wentworth's Location" if name not in counties: county = {'name': name, 'shapes': [], 'largest': [0, 0]} counties[name] = county points = [] def attr(pt, key): return float(pt.attrib[key].strip()) for xmlPoint in xmlCounty.getiterator('rtept'): points.append([attr(xmlPoint, 'lat'), attr(xmlPoint, 'lon')]) county = counties[name] county['shapes'].append(polyInfo(points)) return {'state': {}, 'counties': counties}
def getData( earth=False ): filename = 'co12_d00_shp' + ['-76',''][earth] + '/co12_d00.gpx' print 'Reading %s...' % filename xmlRoot = ET.parse( filename ) counties = {} for xmlCounty in xmlRoot.getiterator('rte'): name = xmlCounty.findtext('name').strip() number = xmlCounty.findtext('number').strip() #print '%s County' % name ## Correct error in census data for Wentworth's Location #if( name == "Wentworth" and number == '9' ): # name = "Wentworth's Location" if name not in counties: county = { 'name': name, 'shapes': [], 'largest': [ 0, 0 ] } counties[name] = county points = [] def attr( pt, key ): return float( pt.attrib[key].strip() ) for xmlPoint in xmlCounty.getiterator('rtept'): points.append( [ attr(xmlPoint,'lat'), attr(xmlPoint,'lon') ] ) county = counties[name] county['shapes'].append( polyInfo( points ) ) return { 'state':{}, 'counties':counties }
def load_sps_infos(self): """ Load sps infos """ # Put header infos in dict sp_ids = [] sp_info_files = glob.glob("%s/*singlepulse.infos"%os.path.join(dbinfo_dir, self.obs_id)) for ii,sp_info_file in enumerate(sp_info_files): infos = ET.parse(sp_info_file) header = {} header['header_id'] = "%d"%self.header_id header['sp_num'] = "%d"%(ii+1) header['version_id'] = "1" for element in infos.getiterator(): header[element.tag] = element.text key = ['header_id', 'sp_num', 'sp_plot_type_id', 'ps_filename', 'png_filename' ] cmd = 'INSERT IGNORE INTO sp_plots (%s) VALUES (%s);' % (','.join(key), ','.join([header[k] for k in key])) #print cmd self.DBcursor.execute(cmd) cmd = 'SELECT LAST_INSERT_ID() FROM sp_plots;' self.DBcursor.execute(cmd) sp_ids.append(self.DBcursor.fetchone()[0]) if sp_info_files: return sp_ids[0], sp_ids[-1] else: return 0,0
def addFont(self, fontname, filename, size, style="", aspect=""): try: reload_skin = False fontxml_paths = self.getFontsXML() if fontxml_paths: for fontxml_path in fontxml_paths: print "analyse du fichier: " + fontxml_path if not self.isFontInstalled(fontxml_path, fontname): tree = ET.parse(fontxml_path) root = tree.getroot() print "modification du fichier: " + fontxml_path for sets in root.getchildren(): sets.findall( "font")[-1].tail = "\n\t\t" # "\n\n\t\t" new = ET.SubElement(sets, "font") new.text, new.tail = "\n\t\t\t", "\n\t" subnew1 = ET.SubElement(new, "name") subnew1.text = fontname subnew1.tail = "\n\t\t\t" subnew2 = ET.SubElement(new, "filename") subnew2.text = ( filename, "Arial.ttf")[sets.attrib.get("id") == "Arial"] subnew2.tail = "\n\t\t\t" subnew3 = ET.SubElement(new, "size") subnew3.text = size subnew3.tail = "\n\t\t\t" last_elem = subnew3 if style in [ "normal", "bold", "italics", "bolditalics" ]: subnew4 = ET.SubElement(new, "style") subnew4.text = style subnew4.tail = "\n\t\t\t" last_elem = subnew4 if aspect: subnew5 = ET.SubElement(new, "aspect") subnew5.text = aspect subnew5.tail = "\n\t\t\t" last_elem = subnew5 reload_skin = True last_elem.tail = "\n\t\t" tree.write(fontxml_path) reload_skin = True except: print_exc() if reload_skin: if not os.path.exists(os.path.join( self.skin_font_path, filename)) and os.path.exists( os.path.join(self.script_font_path, filename)): shutil.copyfile(os.path.join(self.script_font_path, filename), os.path.join(self.skin_font_path, filename)) xbmc.executebuiltin("XBMC.ReloadSkin()") return True return False
def importFile(self, inputFilepath): inputDirectory, inputFilename = os.path.split(inputFilepath) tree = ET.parse(inputFilepath) root = tree.getroot() animationSets = [] self.fileDataTable = defaultdict(dict) self.skinFileMap = {} for folderNode in root.findall('folder'): folderId = int(folderNode.get('id')) folderName = folderNode.get('name') basePath = inputDirectory for fileNode in folderNode.findall('file'): fileId = int(fileNode.get('id')) name = fileNode.get('name') data = dict( id=fileId, folderId=folderId, name=name, filename=os.path.join(basePath, name), width=int(fileNode.get('width')), height=int(fileNode.get('height')) ) self.fileDataTable[folderId][fileId] = data for entityNode in root.findall('entity'): animationSet = self._importEntity(entityNode) animationSets.append(animationSet) return animationSets, self.skinFileMap
def __init__(self, f): tree = ET.parse(file(os.path.join(f, 'types.xml'), 'r')) self.root = tree.getroot() if self.root.tag.strip() != "battlemedia": raise TypeError("Not a battlemedia file.") for node in self.root: id = node.attrib['id'] class new(object): type = id weapontype = None weaponpoints = [] for value in node.getchildren(): tag = value.tag.strip() text = value.text.strip() if tag in self.paths: setattr(new, tag, os.path.join(f, text)) elif len(text) > 0: setattr(new, tag, text) else: setattr(new, tag, value) tag = node.tag.strip() globals()[id] = new
class NSpider(BaseSpider): tree = ET.parse( os.path.join(os.path.abspath(os.path.dirname(__file__)), '../', 'config.xml')) cfg = tree.getroot() name = cfg.find('./local/name').text allowed_domains = [ domain.text for domain in cfg.findall('./remote/domain') ] start_urls = [url.text for url in cfg.findall('./remote/start_url')] def parse(self, response): hxs = HtmlXPathSelector(response) platform = hxs.select('//div[@id="heading"]/h1/text()').extract() games = hxs.select( "//div[@class='listings']/table/tr/td/a/text()").extract() urls = hxs.select( '//div[@class="listings"]/table/tr/td/a/@href').extract() urls = [ self.allowed_domains[0] + i.replace("rs", "dd/rls") for i in urls ] items = [] for i, e in enumerate(games): item = RomItem() item['platform'] = platform item['title'] = e item['url'] = urls[i] items.append(item) return items
def readfp(self, fp): """\ Read the xml config file from a file pointer """ self._xml = ElementTree.parse(fp) self._loadSettings()
def __init__(self, *args, **kwargs): xbmcgui.WindowXMLDialog.__init__(self, *args, **kwargs) self._close_game = kwargs["close_game"] self.winners = [] if os.path.isfile(WINNER_JACKPOT): try: f = open(WINNER_JACKPOT, "rb") self.winners = sorted(load(f), reverse=True, key=lambda id: (int(id[4]), int(id[4]) / int(id[2]), int(id[3]) > int(id[4]))) f.close() except: self.winners = [] print_exc() try: tree = open(FILE_KEYMAP_INFOS) self.KeymapInfos = ET.parse(tree).getroot() tree.close() except: self.KeymapInfos = None print_exc() self.dialog_is_ok = False
def parse_xml_sections( self ): try: elems = ET.parse( open( os.path.join( self.configManager.CACHEDIR, self.srvItemDescripFile ), "r" ) ).getroot() self.cat_skins = elems.find( "skins" ).findall( "entry" ) self.cat_scripts = elems.find( "scripts" ).findall( "entry" ) #self.cat_scrapers = elems.find( "scrapers" ).findall( "entry" ) elemsplugins = elems.find( "plugins" ) self.cat_videoplugin = elemsplugins.find( "videoplugin" ).findall( "entry" ) self.cat_musicplugin = elemsplugins.find( "musicplugin" ).findall( "entry" ) self.cat_pictureplugin = elemsplugins.find( "pictureplugin" ).findall( "entry" ) self.cat_programplugin = elemsplugins.find( "programplugin" ).findall( "entry" ) elemsscrapers = elems.find( "scrapers" ) self.cat_musicscraper = elemsscrapers.find( "musicscraper" ).findall( "entry" ) self.cat_videoscraper = elemsscrapers.find( "videoscraper" ).findall( "entry" ) del elemsplugins del elemsscrapers del elems except: self.cat_skins = None self.cat_scripts = None self.cat_videoplugin = None self.cat_musicplugin = None self.cat_pictureplugin = None self.cat_programplugin = None self.cat_musicscraper = None self.cat_videoscraper = None print "Error while parsing installeur_content.xml" print_exc()
def query(self, type, pair, bbox) : # for example # [bbox=-123.51855362314379,37.33815370551646,-121.3212879982315,38.31438176170056] wnes = ",".join(map(str, [bbox[1], bbox[0], bbox[3], bbox[2]])) q = "%s[%s][bbox=%s]" % (type, pair, wnes) url = self.endpoint + q # print >> sys.stderr, url rsp = urllib2.urlopen(url) doc = ET.parse(rsp) if type == 'node': features = self.nodes(doc) elif type == 'way': features = self.ways(doc) else: features = [] return { 'type' : 'FeatureCollection', 'features' : features }
def initialize_kiosks(): # parse tree and add to database tree = ElementTree.parse(ROOT_DIR + "bicimap/crawler/stations") root = tree.getroot() for child in root.find("markers").findall("marker"): number = child.get("number") name = child.get("name") address = child.get("address") full_address = child.get("fullAddress") lat = child.get("lat") lng = child.get("lng") # Check if it's already in the database try: kio = Kiosk( number=number, name=name, address=address, full_address=full_address, lat=lat, lng=lng, last_updated=datetime.datetime.now(), spaces=0, bikes=0, ) kio.save() print "Added kiosk number: %d" % int(number) except IntegrityError, msg: print msg
def makeReactionSet (xml='enzymes_out.xml'): pathwaytree = etree.parse(xml) pathwaytree = pathwaytree.getroot() """prepare new Reaction objects from XML""" res = [] for reaction in pathwaytree.getiterator(tag='reaction'): newReactionName = "" newReactantList = [] newProductList = [] try: newReactionName = reaction.attrib['name'] for reactantlist in reaction.getiterator(tag='listOfReactants'): for reactant in reactantlist: newReactantList.append(reactant.attrib['species']) for productlist in reaction.getiterator(tag='listOfProducts'): for product in productlist: newProductList.append(product.attrib['species']) r = Reaction(name=newReactionName, enzymeName=newReactionName, listOfProducts=newProductList, listOfReactants=newReactantList) res.append(r) except Exception as e: print(e) # makes set of all enzymes in the source file and positions at bottom of screen # for iteration & tag == reaction # newEnzyme(name, metabolites_in, metabolites_out) # Drawable(name='my name') for i in res: print(i) return res
def parse_xml(): """ fonction: pour parser le fichier RssFeeds.xml """ feeds = {} try: source = open(XML_PATH) tree = ET.parse(source) source.close() for elems in tree.getroot(): setid = elems.attrib.get("id") if setid: feed = [] feedstag = elems.findall("feed") for feedtag in feedstag: feed.append({ "updateinterval": feedtag.attrib.get("updateinterval", "30"), "feed": feedtag.text }) rtl = elems.attrib.get("rtl", "false") feeds[setid] = {"rtl": rtl, "feed": feed} del tree except: print_exc() feeds = {} return feeds
def getVersion(self, manifest): if dbg == 'true': log.info('SMTH - getVersion() -> xml: ' + str(manifest)) #parser = ET.XMLParser(encoding="utf-8") #elems = ET.parse(manifest, parser = parser).getroot() elems = ET.parse(manifest).getroot() return elems.attrib['MajorVersion']
def rip (file, langs): try: all_document = ET.parse (file) except: sys.stderr.write ("Error parsing file %s. Skipped\n") return document = all_document.getroot () current_langs = copy (langs) #TODO: #compare_files (document) output_helper_name = os.path.join (work_dir, os.path.dirname (file), ".helper", os.path.basename (file) ) make_dir (os.path.dirname (output_helper_name)) if not "no-create-tags" in options: create_missing_tags (document, langs) create_ids (output_helper_name, document) all_document.write (output_helper_name, "utf-8") recurse_rip (Stub (), document, langs) remove_hashes (document) output_file_name = os.path.join (work_dir, file) all_document.write (output_file_name, "utf-8")
def loadSkin(self, file): """ This function loads a new skin by reading and parsing the file, and then updating the variables in this class. The skin is specified by a file, in the path specified in the class documentation. It only updates the variables here, and doesn't make any changes to the GUI. NOTE: The size of buttons is exactly the size of the image used for the button. """ skinTree = ElementTree.parse(file).getroot() internalThermoTree = skinTree.find('internalThermo') self.day_fgColor = utility.str2tuple( internalThermoTree.findtext("day_fgColor"), "internalThermo.day_fgColor") self.day_bgColor = utility.str2tuple( internalThermoTree.findtext("day_bgColor"), "internalThermo.day_bgColor") self.day_fadeColor = utility.str2tuple( internalThermoTree.findtext("day_fadeColor"), "internalThermo.day_fadeColor") self.night_fgColor = utility.str2tuple( internalThermoTree.findtext("night_fgColor"), "internalThermo.night_fgColor") self.night_bgColor = utility.str2tuple( internalThermoTree.findtext("night_bgColor"), "internalThermo.night_bgColor") self.night_fadeColor = utility.str2tuple( internalThermoTree.findtext("night_fadeColor"), "internalThermo.night_fadeColor") self.fadeFactor = int(internalThermoTree.findtext("fadeFactor"))
def rip(file, langs): try: all_document = ET.parse(file) except: sys.stderr.write("Error parsing file %s. Skipped\n") return document = all_document.getroot() current_langs = copy(langs) #TODO: #compare_files (document) output_helper_name = os.path.join(work_dir, os.path.dirname(file), ".helper", os.path.basename(file)) make_dir(os.path.dirname(output_helper_name)) if not "no-create-tags" in options: create_missing_tags(document, langs) create_ids(output_helper_name, document) all_document.write(output_helper_name, "utf-8") recurse_rip(Stub(), document, langs) remove_hashes(document) output_file_name = os.path.join(work_dir, file) all_document.write(output_file_name, "utf-8")
def read_config_xml(b, CONFIG_FILE): """ Read the config.xml file and fill shm in PSRFITS format http://www.atnf.csiro.au/research/pulsar/psrfits/fitsdef.html """ doc = ET.parse(CONFIG_FILE) iter = doc.getiterator() for element in iter: if 'telescope' == element.tag: b.update("TELESCOP",element.text) if 'proj_id' == element.tag: b.update("PROJID",element.text) if 'observer' == element.tag: b.update("OBSERVER",element.text) if 'ra_str' == element.tag: b.update("RA_STR",element.text) if 'dec_str' == element.tag: b.update("DEC_STR",element.text) if 'front_end' == element.tag: b.update("FRONTEND",element.tag) if 'fd_poln' == element.tag: b.update("FD_POLN",element.text) if 'equinox' == element.tag: b.update("EQUINOX",float(element.text)) if 'obs_mode' == element.tag: b.update("OBS_MODE",element.text) if 'obsfreq' == element.tag: b.update("OBSFREQ",float(element.text)) if 'obsbw' == element.tag: b.update("OBSBW",float(element.text)) if 'totnchan' == element.tag: b.update("TOTNCHAN",int(element.text)) if 'n_ds' == element.tag: b.update("N_DS",int(element.text)) if 'n_gpu' == element.tag: b.update("N_GPU",int(element.text)) if 'n_bins' == element.tag: b.update("NBIN",int(element.text)) if 't_dump' == element.tag: b.update("TFOLD",float(element.text)) if 'scan' == element.tag: b.update("SCANNUM",int(element.text)) if 'scanlen' == element.tag: b.update("SCANLEN",float(element.text)) if 'single_pulse' == element.tag: b.update("SNGPULSE",element.text) if 'nb_psr' == element.tag: b.update("NB_PSR",int(element.text)) # TODO : check MAX PSR if 'NB_PSR' in b.keys(): for i in range(b['NB_PSR']): if 'psr_name_%d'%i == element.tag: b.update("SRC_NAME%d"%i,element.text) if 'chan_dm_%d'%i == element.tag: b.update("DM%d"%i,float(element.text))
def load_from_xml (self, context): url = '%s?IP=%s' % (self.LOOKUP_URL, self.ip_address) try: fp = urllib.urlopen(url) self.record_xml = fp.read() fp.close() # NOTE: Is it safe to pickle an ElementTree tree? # Don't bind to self if not. self.tree = etree.parse(StringIO(self.record_xml)) records = self.tree.findall('.//{%s}resolverRegistryEntry' % self.NS) if not records: raise 'InvalidIPAddress' for attr, studly in [ ('institution', 'institutionName'), ('base_url', 'baseURL'), ('icon_url', 'linkIcon'), ('link_text', 'linkText')]: try: setattr(self, attr, self.tree.findall('.//{%s}%s' % \ (self.NS, studly))[0].text) except: setattr(self, attr, '') except: # Assign default OCLC "Find in a Library" values as useful fallback self.institution = 'OCLC' self.base_url = 'http://worldcatlibraries.org/registry/gateway' if not self.link_text: self.link_text = 'Find in a library' if not self.icon_url: # Note: without a context self.icon_url = '%s/images/findinalibrary_badge.gif' % \ context.config.site_base_url
def getData(earth=False): filename = "co32_d00_shp" + ["-72", ""][earth] + "/co32_d00.gpx" print "Reading %s..." % filename xmlRoot = ET.parse(filename) counties = {} for xmlCounty in xmlRoot.getiterator("rte"): name = xmlCounty.findtext("name").strip() number = xmlCounty.findtext("number").strip() # print '%s County' % name ## Correct error in census data for Wentworth's Location # if( name == "Wentworth" and number == '9' ): # name = "Wentworth's Location" if name not in counties: county = {"name": name, "shapes": [], "largest": [0, 0]} counties[name] = county points = [] def attr(pt, key): return float(pt.attrib[key].strip()) for xmlPoint in xmlCounty.getiterator("rtept"): points.append([attr(xmlPoint, "lat"), attr(xmlPoint, "lon")]) county = counties[name] county["shapes"].append(polyInfo(points)) return {"state": {}, "counties": counties}
def write(any_list, mode): try: import elementtree.ElementTree as et except: print "Please Run dep_install first" # XML path variable path = get_xml_path() # Code Retriving try: tree = et.parse(path) root = tree.getroot() except: print ".data.xml not found, exiting" exit() # Writing it to xml again according to mode if mode == 1: # Write it in <apps></apps> for apps in root: if apps.tag == 'apps': for i in range(len(any_list)): # I Am assuming that length of any_list and <gen></gen> is same = 5 apps[i].text=any_list[i] else: # Write it in <files></files> for files in root: if files.tag == 'files': for i in range(len(any_list)): files[i].text=any_list[i] tree.write(path)
def create_solr_dict(self, file, data=None): import elementtree.ElementTree as ET result = {} tree = ET.parse(file) root = tree.getroot() record = [] summary = False for ch in root.getiterator(): if ch.tag != xml.DOC and ch.tag != xml.ADD and ch.tag != xml.DOCS and ch.tag != "DOCUMENT": if ch.tag == "Section_Title" and (ch.text == xml.PROJ_DESC or ch.text == xml.PROJ_SUM): summary = True if ch.text == xml.PROJ_SUM: tag_title = xml.SUM_TAG if ch.text == xml.PROJ_DESC: tag_title = xml.DESC_TAG elif summary == True: if ch.tag == "DRECONTENT": if tag_title == xml.DESC_TAG: result[xml.DESC_TAG] = ch.text if tag_title == xml.SUM_TAG: result[xml.SUM_TAG] = ch.text summary = False elif ch.tag == "Proposal_Status":#rest of tags result[ch.tag] = ch.text if data: for (key,val) in data.iteritems(): result[key] = val return result
def contexts(self, triple=None): """ Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }" or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`. Returns instances of this store with the SPARQL wrapper object updated via addNamedGraph(?NAME). This causes a named-graph-uri key / value pair to be sent over the protocol. Please note that some SPARQL endpoints are not able to find empty named graphs. """ self.resetQuery() if triple: s, p, o = triple params = ((s if s else Variable('s')).n3(), (p if p else Variable('p')).n3(), (o if o else Variable('o')).n3()) self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params) else: self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }') doc = ElementTree.parse(SPARQLWrapper.query(self).response) return ( rt.get(Variable("name")) for rt, vars in TraverseSPARQLResultDOM(doc, asDictionary=True))
def contexts(self, triple=None): """ Iterates over results to SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } } returning instances of this store with the SPARQL wrapper object updated via addNamedGraph(?NAME) This causes a named-graph-uri key / value pair to be sent over the protocol """ if triple: s, p, o = triple else: s = p = o = None params = ((s if s else Variable('s')).n3(), (p if p else Variable('p')).n3(), (o if o else Variable('o')).n3()) self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params) doc = ElementTree.parse(SPARQLWrapper.query(self).response) return ( rt.get(Variable("name")) for rt, vars in TraverseSPARQLResultDOM(doc, asDictionary=True))
def get_subthemes(theme_id): """ recupere les sous-themes du theme fourni et renvoi un dictionnaire : { subthemeID : titre , ... } """ print "get_subthemes called ..." #print theme_id elems = ET.parse(urllib.urlopen(videoplayerURL + ThemesPage)).getroot() #print ET.tostring(elems) subthemes = [] for theme in elems.find("THEMATIQUES").findall("THEMATIQUE"): #print ET.tostring(theme) try: cur_id = int(theme.findtext("ID")) if int(cur_id) == int(theme_id): for subtheme in theme.find("SELECTIONS").findall("SELECTION"): subtheme_id = int(subtheme.findtext("ID")) subtheme_nom = subtheme.findtext("NOM") subthemes.append([subtheme_id, subtheme_nom.strip()]) break except: print "get_subthemes - Erreur durant le parcorus des themes" traceback.print_exc() #print 'subthemes' #print subthemes return subthemes
def parseCldrFile(filename, outputDirectory=None): tree = ElementTree.parse(filename) language, territory = getLocale(tree) data = {} for cal in tree.findall('dates/calendars/calendar'): if not cal.attrib.has_key("type"): continue if cal.attrib["type"] != "gregorian": continue data.update(extractMonth(cal)) data.update(extractDay(cal)) #data.update(extractQuarter(cal)) data.update(extractAmPm(cal)) data.update(extractDateFormat(cal)) data.update(extractTimeFormat(cal)) data.update(extractDateTimeFormat(cal)) data.update(extractFields(cal)) data.update(extractDelimiter(tree)) data.update(extractNumber(tree)) locale = language if territory != "": locale += "_" + territory code = getJavaScript(data, locale, language, territory) if outputDirectory != None: outfile = os.path.join(outputDirectory, locale + ".js"); open(outfile, "w").write(code) else: return code
def doRequest(self, requesterFunc, extractor): template = self.BaseTemplate.copy() timestamp = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) requesterFunc(template) template["Timestamp"] = timestamp canonicalQuery = self.makeCanonicalQuery(template) print canonicalQuery stringToSign = "GET\n%s\n%s\n%s" % (self.host, self.onca, canonicalQuery) signature = self.sign(self.SecretAccessKey, stringToSign) signedURL = "http://%s%s?%s&Signature=%s" % (self.host, self.onca, canonicalQuery, signature) f = urllib.urlopen(signedURL) tree = ET.parse(f) # ET.tostring(tree.getroot()) f.close() try: return extractor(tree) except TypeError: nodes = tree.getroot().findall(extractor) a = [] for node in nodes: data = {} for elt in node: tag_name = elt.tag val = elt.text if tag_name.startswith(self.AWS_NS): tag_name = tag_name[len(self.AWS_NS):] data[tag_name] = elt.text a.append(data) return a
def loadConfig(req): """Set globals and load config values from external XML file""" result = {} logging.basicConfig( level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s", filename="/var/log/mod/update.log", filemode="w", ) # define a Handler which writes INFO messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s") # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger("").addHandler(console) options = req.get_options() # options = apache.main_server.get_options() # mod_python 3.3 apparently config = options["MODFILMS_CONFIG"] try: root = ElementTree.parse(config) except OSError, e: logging.error("Config error: " + e) logging.error("Check MODFILMS_CONFIG environment variable is set")
def main(): """ """ advset = 'special://userdata/advancedsettings.xml' source = os.path.join(rootdir, 'resources', 'advancedsettings.xml') try: if os.path.isfile(advset): settings = get_screensaver_settings(sys.argv[2]) tree = ET.parse(advset) root = tree.getroot() if tree.findtext('screensaver'): newroot = update_screensaver_settings(root, settings) else: newroot = insert_screensaver_settings(root, settings) # wrap it in an ElementTree instance, and save as XML tree = ET.ElementTree(newroot) tree.write(advset, ENCODING) else: copy_advanced_settings(source, advset) main() #xbmc.executebuiltin('XBMC.ReloadSkin()') #xbmcgui.Dialog().ok( _(30013), _(30014),slideshowpath) if xbmcgui.Dialog().yesno(_(30013), _(30014), _(30015)): xbmc.executebuiltin('XBMC.RestartApp') except: return
def youtube(request): if not request.user.is_staff: raise Http404 import elementtree.ElementTree as ET try: user = settings.YOU_TUBE_USER needs_user_setting = False except AttributeError: user = "******" needs_user_setting = True gdata_feed = "http://gdata.youtube.com/feeds/videos?author=%s&orderby=updated" % (user,) root = ET.parse(urllib.urlopen(gdata_feed)).getroot() videos = [] for e in root.findall("{http://www.w3.org/2005/Atom}entry"): video = {} video["title"] = e.findtext("{http://www.w3.org/2005/Atom}title") date = e.findtext("{http://www.w3.org/2005/Atom}published").split("T")[0] video["upload_date"] = date media = e.find("{http://search.yahoo.com/mrss/}group") video["description"] = media.findtext("{http://search.yahoo.com/mrss/}description") video["thumb"] = media.find("{http://search.yahoo.com/mrss/}thumbnail").attrib["url"] video["image"] = media.findall("{http://search.yahoo.com/mrss/}thumbnail")[-1].attrib["url"] video["url"] = media.find("{http://search.yahoo.com/mrss/}content").attrib["url"] videos.append(video) return render_to_response("upload/youtube.html", {"videos": videos, "textarea_id": request.GET["textarea"], "needs_user_setting": needs_user_setting}, context_instance=RequestContext(request))
def get_themes(): """ recupere et retourne une liste des themes et sous themes [ [ themeid , titre , [ [subtheme_id,titre] , ... ] ] , ... ] """ print "get_themes called ..." elems = ET.parse(urllib.urlopen(videoplayerURL + ThemesPage)).getroot() #print ET.tostring(elems) themes = [] for theme in elems.find("THEMATIQUES").findall("THEMATIQUE"): #print ET.tostring(theme) try: theme_id = int(theme.findtext("ID")) theme_nom = theme.findtext("NOM") theme_color = theme.findtext("COULEUR").replace("#", "") themes.append([theme_id, theme_nom.strip(), theme_color]) except: print "get_themes- Erreur durant le parcorus des themes" traceback.print_exc() #print 'themes' #print themes return themes
def __init__(self,file=None,level=soya.World,verbose=False,volume_class=soya.Volume,elementtree=None): self.soyaVolume=volume_class self.infile=file if file: print "Reading:",file self.tree=ElementTree.parse(file) elif elementtree: self.tree=elementtree self.root=root=self.tree.getroot() ElementInclude.include(root) self.level=level() self.level_static=soya.World(self.level) self.materials={} self.verbose=verbose iter=root.getchildren() for element in iter: if verbose: print "Main Element:", element.tag, try: self.handle_element(element) if verbose: print "ok" except: raise LevelError('main error handling %s' % element.tag) self.done()
def wavelength_plist(dest_plist, base_plist, wavelength): """ specific-wavelength plist from a base-plist """ tree = ET.parse(base_plist) after_key = False for elt in tree.getiterator(): if after_key: # wavelength "value" entry: if (elt.tag != 'real'): print 'elt.tag = %s' % elt.tag raise RuntimeError, '\"wavelength\" key with value tag not \"real\"' elt.text = '%f' % wavelength after_key = False elif (elt.text == 'wavelength'): # wavelength "key" entry: after_key = True out_fp = open(dest_plist, 'wt') # need to _explicitely_ write the header lines to the output file (as elementtree ignores them) print >> out_fp, '<?xml version="1.0" encoding="UTF-8"?>' print >> out_fp, '<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">' tree.write(out_fp) out_fp.close()
def contexts(self, triple=None): """ Iterates over results to "SELECT ?NAME { GRAPH ?NAME { ?s ?p ?o } }" or "SELECT ?NAME { GRAPH ?NAME {} }" if triple is `None`. Returns instances of this store with the SPARQL wrapper object updated via addNamedGraph(?NAME). This causes a named-graph-uri key / value pair to be sent over the protocol. Please note that some SPARQL endpoints are not able to find empty named graphs. """ self.resetQuery() if triple: s, p, o = triple params = ((s if s else Variable('s')).n3(), (p if p else Variable('p')).n3(), (o if o else Variable('o')).n3()) self.setQuery('SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params) else: self.setQuery('SELECT ?name WHERE { GRAPH ?name {} }') doc = ElementTree.parse(SPARQLWrapper.query(self).response) return ( rt.get(Variable("name")) for rt, vars in _traverse_sparql_result_dom( doc, as_dictionary=True, node_from_result=self.node_from_result) )
def get_episode_by_airdate(self, show_id, aired): """Get the episode object matching this episode_id.""" # url = "%s/series/%s/default/%s/%s" % (self.base_key_url, show_id, season_num, ep_num) """http://www.thetvdb.com/api/GetEpisodeByAirDate.php?apikey=1D62F2F90030C444&seriesid=71256&airdate=2010-03-29""" url = "%s/GetEpisodeByAirDate.php?apikey=1D62F2F90030C444&seriesid=%s&airdate=%s" % ( self.base_url, show_id, aired, ) data = urllib.urlopen(url) print url episode = None # code to check if data has been returned temp_data = str(data.read()) print "data returned from TheTVDB " + temp_data if temp_data.startswith("<?xml") == False: print "No data returned ", temp_data return episode data = urllib.urlopen(url) try: tree = ET.parse(data) episode_node = tree.find("Episode") episode = TheTVDB.Episode(episode_node, self.mirror_url) except SyntaxError: pass return episode