def onRefreshClick(self, widget): if not self.steamid.get_text(): return files = [ f for f in listdir(SteamApps) if isfile(join(SteamApps,f)) ] appids = [] for file in files: m = re.search(r"appmanifest_([0-9]+).acf", file) if m: appids.append( int( m.groups(1)[0] ) ) # Get user's id url_id64 = "http://steamcommunity.com/id/"+ self.steamid.get_text() +"/games?tab=all&xml=1" html = urlopen(url_id64) tree = ElementTree() tree.parse(html) steamID64 = tree.getiterator('steamID64')[0].text # Get game data url = "https://api.steampowered.com/ISteamApps/GetAppList/v0002/?key=" + steamID64 + "&format=xml" html = urlopen(url) tree = ElementTree() tree.parse(html) games_xml = tree.getiterator('app') for game in games_xml: appid = int(game.find('appid').text) name = game.find('name').text exists = appid in appids self.game_liststore.append([exists, appid, name])
def _parseDetectiveFile(self, file): tree = ElementTree() tree.parse(file) for library in tree.getiterator(tag="LIBRARY"): for insert in library.getiterator(tag="INSERT"): seqs = list(insert.getiterator(tag="SEQUENCE")) self.insert2seqs[insert.attrib["ID"]] = \ (seqs[0].attrib["NAME"], \ seqs[1].attrib["NAME"], \ library.attrib["MIN"], \ library.attrib["MAX"]) self.seqname2seqid[ seqs[0].attrib["NAME"] ] = seqs[0].attrib["ID"] self.seqname2seqid[ seqs[1].attrib["NAME"] ] = seqs[1].attrib["ID"] for contig in tree.getiterator(tag="CONTIG"): contigId = self._toContigId(contig.attrib["ID"]) self.contig2length[ contigId ] = contig.attrib["LEN"] for seq in contig.getiterator(tag="SEQUENCE"): self.seq2contigloc[ seq.attrib["ID"] ] = \ (contigId, \ "+" if seq.attrib["ORI"] == "BE" else "-", \ seq.attrib["ASM_LEND"], seq.attrib["ASM_REND"])
def check_platespin_connectivity (): global PLATESPIN_NETWORK_ID got_connectivity = False print "[*] PlateSpin URL is %s" % PLATESPIN_SERVER_URL try: # Retrieve server product version tree = ElementTree () tree.parse (urllib2.urlopen (PLATESPIN_SERVER_URL)) product_name = tree.getiterator ('h1')[0].text product_version = xml.etree.ElementTree.tostring (tree.getiterator ('span')[0] ).replace (' ', '').replace('\n','').rsplit ('>', 1)[1] # Retrieve Network ID tree = ElementTree () tree.parse (urllib2.urlopen (PLATESPIN_SERVER_URL + "/Network.asmx/GetNetworks")) for network in tree.getiterator ('{%s}Network' % PLATESPIN_XML_NS): if network.find ('{%s}name' % PLATESPIN_XML_NS).text == PLATESPIN_SERVER_NETWORK: PLATESPIN_NETWORK_ID = network.find ('{%s}id' % PLATESPIN_XML_NS).text got_connectivity = True break except urllib2.HTTPError as herror: print "[E] Unable to open %s: %s (HTTP %s)." % (PLATESPIN_SERVER_URL , herror.msg, herror.code) if herror.code == 401: print "[E] Is Basic Authentication enabled in IIS?" except urllib2.URLError as uerror: print "[E] Unable to open %s: %s." % (PLATESPIN_SERVER_URL, uerror.reason) if not got_connectivity: print "[E] Unable to connect to the PlateSpin Server." sys.exit (1) print "[*] Connected to %s v.%s." % (product_name, product_version) print "[*] Network ID for %s is %s." % (PLATESPIN_SERVER_NETWORK, PLATESPIN_NETWORK_ID)
def parse_osm_file(osm_file): """ Given an OSM filename ``osm_file`` """ nodes = {} ways = {} nodes_ways = defaultdict(list) ways_ways = defaultdict(dict) node_connections = defaultdict(set) pubs = [] tree = ElementTree() tree.parse(osm_file) with print_status("Getting nodes... "): for node in tree.getiterator("node"): n = node.attrib n['lat'] = float(n['lat']) n['lon'] = float(n['lon']) n['id'] = int(n['id']) n['tags'] = dict( (x.attrib['k'], x.attrib['v']) for x in node.findall("tag")) nodes[int(n['id'])] = n if is_pub(n): pubs.append(n) with print_status("Getting ways... "): for way in tree.getiterator("way"): w = way.attrib w['id'] = int(w['id']) w['tags'] = dict( (x.attrib['k'], x.attrib['v']) for x in way.findall("tag")) nds = [int(x.attrib['ref']) for x in way.findall("nd")] for nd in nds: nodes_ways[nd].append(w['id']) w['nodes'] = [nodes[x] for x in nds] connect_nodes_in_way(w, node_connections) ways[w['id']] = w # a pub tagged as a building # for position just use avg of nodes if is_pub(w): sum_lat, sum_lon = 0, 0 for n in w['nodes']: sum_lat += n['lat'] sum_lon += n['lon'] new_w = deepcopy(w) new_w['lat'] = sum_lat / len(w['nodes']) new_w['lon'] = sum_lon / len(w['nodes']) pubs.append(new_w) return nodes, ways, nodes_ways, ways_ways, node_connections, pubs
def parse_osm_file(osm_file): """ Given an OSM filename ``osm_file`` """ nodes = {} ways = {} nodes_ways = defaultdict(list) ways_ways = defaultdict(dict) node_connections = defaultdict(set) pubs = [] tree = ElementTree() tree.parse(osm_file) with print_status("Getting nodes... "): for node in tree.getiterator("node"): n = node.attrib n['lat'] = float(n['lat']) n['lon'] = float(n['lon']) n['id'] = int(n['id']) n['tags'] = dict((x.attrib['k'], x.attrib['v']) for x in node.findall("tag")) nodes[int(n['id'])] = n if is_pub(n): pubs.append(n) with print_status("Getting ways... "): for way in tree.getiterator("way"): w = way.attrib w['id'] = int(w['id']) w['tags'] = dict((x.attrib['k'], x.attrib['v']) for x in way.findall("tag")) nds = [int(x.attrib['ref']) for x in way.findall("nd")] for nd in nds: nodes_ways[nd].append(w['id']) w['nodes'] = [nodes[x] for x in nds] connect_nodes_in_way(w, node_connections) ways[w['id']] = w # a pub tagged as a building # for position just use avg of nodes if is_pub(w): sum_lat, sum_lon = 0, 0 for n in w['nodes']: sum_lat += n['lat'] sum_lon += n['lon'] new_w = deepcopy(w) new_w['lat'] = sum_lat / len(w['nodes']) new_w['lon'] = sum_lon / len(w['nodes']) pubs.append(new_w) return nodes, ways, nodes_ways, ways_ways, node_connections, pubs
def ParseReaderFile(fname): xml_reader = ElementTree() xml_reader.parse(fname) reading_label = None time_in_sec = None well = (0, 0) measurement = None plate_values = {} for e in xml_reader.getiterator(): if e.tag == 'Section': reading_label = e.attrib['Name'] TIME = e.attrib['Time_Start'] TIME = TIME[:19] TS = time.strptime(TIME, fmt) time_in_sec = calendar.timegm(TS) plate_values[reading_label] = {} plate_values[reading_label][time_in_sec] = {} elif e.tag == 'Well': W = e.attrib['Pos'] well_row = ord(W[0]) - ord('A') well_col = int(W[1:]) - 1 well = (well_row, well_col) elif e.tag == 'Multiple': if e.attrib['MRW_Position'] == 'Mean': measurement = e.text plate_values[reading_label][time_in_sec][well] = float( measurement) elif e.tag == 'Single': measurement = e.text if measurement == "OVER": plate_values[reading_label][time_in_sec][well] = None else: plate_values[reading_label][time_in_sec][well] = float( measurement) return plate_values
def install_drp_flag(self): flag_disk_path = os.path.join(rcEnv.paths.pathvar, 'drp_flag.vdisk') from xml.etree.ElementTree import ElementTree, SubElement tree = ElementTree() try: tree.parse(self.cf) except Exception as exc: raise ex.excError("container config parsing error: %s" % exc) # create the vdisk if it does not exist yet if not os.path.exists(flag_disk_path): with open(flag_disk_path, 'w') as f: f.write('') f.close() # check if drp flag is already set up for disk in tree.getiterator("disk"): e = disk.find('source') if e is None: continue (dev, path) = e.items()[0] if path == flag_disk_path: self.log.info("flag virtual disk already exists") return # add vdisk to the vm xml config self.log.info("install drp flag virtual disk") devices = tree.find("devices") e = SubElement(devices, "disk", {'device': 'disk', 'type': 'file'}) SubElement(e, "driver", {'name': 'qemu'}) SubElement(e, "source", {'file': flag_disk_path}) SubElement(e, "target", {'bus': 'virtio', 'dev': 'vdosvc'}) tree.write(self.cf)
def guess_location(in_xml, in_fasta, write_out = False): """Tries to guess the genomic location of a fragment.""" name, seq = fasta_iter(in_fasta).next() parts = name.split('|') if len(parts) == 3: return starts = [] seq_len = len(seq) try: tree = ElementTree(file = in_xml) for it in tree.getiterator('Iteration'): start_elem = it.find('Iteration_hits/Hit/Hit_hsps/Hsp/Hsp_hit-from') name_elem = it.find('Iteration_query-def') if start_elem is not None: tstart = int(name_elem.text.split('_')[1]) starts.append(int(start_elem.text)-tstart) except ExpatError: return None if starts: start = max(sum(starts)/len(starts), 1) if write_out: with open(in_fasta, 'w') as handle: handle.write('>%s|%i|%i\n%s\n' % (name, start, start+seq_len, seq)) return start
def importOETerms(): """Create a list of openEHR Terms to add to the term server.""" termslist=[] #create an ElementTree instance from an XML file doc = ElementTree(file=OPENEHR_TERMINOLOGY_FILE) # now get an iterator and breakdown the Elements. tree=doc.getiterator() for x in tree: grouplist = x.findall('group') for n in grouplist: groupnames=n.items() grpname=unicode(groupnames[0][1],'utf-8') #print grpname conceptlist=n.findall('concept') for y in conceptlist: concepts=y.items() conceptId=unicode(concepts[0][1],'utf-8') conceptRubric=unicode(concepts[1][1],'utf-8') #print grpname, conceptId, conceptRubric #"grpname + conceptId is required as a key because some concepIds are repeated in different groups." termslist.append((grpname+' '+conceptId,openEHRTerminology(grpname,conceptId,conceptRubric))) #print termslist return termslist
def determine_subtype_element(in_file, delete_extra = True): hits = defaultdict(int) try: tree = ElementTree(file = in_file) for it in tree.getiterator('Iteration'): hit_list = it.getiterator('Hit') if len(hit_list) > 0: hit = hit_list[0].find('Hit_def').text hits[hit.split('_')[1]] += 1 if delete_extra: for hit in hit_list[1:]: hit.clear() if delete_extra: tree.write(in_file) except ExpatError: return None count = sum(hits.values()) if count < 5: return None elif all([x < count*0.6 for x in hits.values()]): print 'too heterogenus %s' % ','.join(map(str,hits.items())) return None else: for key, val in hits.items(): if val > count*0.6: return key
def __init__(self): # get a list of country codes and real names self.countries = {} fname = "/usr/share/xml/iso-codes/iso_3166.xml" if os.path.exists(fname): et = ElementTree(file=fname) it = et.getiterator('iso_3166_entry') for elm in it: if "common_name" in elm.attrib: descr = elm.attrib["common_name"] else: descr = elm.attrib["name"] if "alpha_2_code" in elm.attrib: code = elm.attrib["alpha_2_code"] else: code = elm.attrib["alpha_3_code"] self.countries[code] = gettext.dgettext('iso_3166', descr) self.country = None self.code = None locale = os.getenv("LANG", default="en.UK") a = locale.find("_") z = locale.find(".") if z == -1: z = len(locale) self.code = locale[a + 1:z] self.country = self.get_country_name(self.code)
def xmltextnorm(inf, outf): """ Write a normalized version of the text form the data from stream inf to the stream at outf, as UTF-8. """ tree = ElementTree() parser = makeparser() tree.parse(inf, parser=parser) paras = tree.getiterator(u"*") text = u' '.join([((p.text or u'') + (p.tail or u'')) for p in paras]) text = text.replace(u'\n', u' ') text = text.replace(u'\t', u' ') while u' ' in text: text = text.replace(u' ', u' ') for p in u'.,;:!?': text = text.replace(u'%s ' % p, u'%s\n' % p) lines = text.split(u'\n') outlines = [] for line in lines: outlines.extend(splitline(line)) if type(outf) in types.StringTypes: with open(outf, 'w') as f: f.write(u'\n'.join(outlines).encode('UTF-8')) else: outf.write(u'\n'.join(outlines + ['']).encode('UTF-8'))
def __init__(self): # get a list of country codes and real names self.countries = {} fname = "/usr/share/xml/iso-codes/iso_3166.xml" if os.path.exists(fname): et = ElementTree(file=fname) it = et.getiterator('iso_3166_entry') for elm in it: if elm.attrib.has_key("common_name"): descr = elm.attrib["common_name"] else: descr = elm.attrib["name"] if elm.attrib.has_key("alpha_2_code"): code = elm.attrib["alpha_2_code"] else: code = elm.attrib["alpha_3_code"] self.countries[code] = gettext.dgettext('iso_3166',descr) self.country = None self.code = None locale = os.getenv("LANG", default="en.UK") a = locale.find("_") z = locale.find(".") if z == -1: z = len(locale) self.code = locale[a+1:z] self.country = self.get_country_name(self.code)
def read_xml(in_fname): tree = ElementTree() tree.parse(in_fname) readdata = [] readingSessions = tree.getiterator( LIDC_NS + "readingSession") # Returns list of all readingSessions for reader in readingSessions: # Iterates through all found readingSessions nonnodules = reader.findall(LIDC_NS + "nonNodule") allnodules = reader.getiterator(LIDC_NS + "unblindedReadNodule") nodules_usl = [] nodules_osl = [] for nodule in allnodules: edgemaps = nodule.findall(LIDC_NS + "roi/" + LIDC_NS + "edgeMap") if len(edgemaps) <= 1: #only one edgemap => nodule < 3mm nodules_usl.append(nodule) else: nodules_osl.append(nodule) #now we have 3 lists for: nodules >3mm, nodules<3mm, nonnodules if VERBOSE: for nodule in nodules_usl: print nodule print "------------------------------------------------------" for nodule in nodules_osl: print nodule print "------------------------------------------------------" for nodule in nonnodules: print nodule readdata.append((nodules_osl, nodules_usl, nonnodules)) return readdata
def ParseReaderFile(fname): xml_reader = ElementTree() xml_reader.parse(fname) reading_label = None time_in_sec = None well = (0, 0) measurement = None plate_values = {} for e in xml_reader.getiterator(): if e.tag == 'Section': reading_label = e.attrib['Name'] TIME = e.attrib['Time_Start'] TIME = TIME[:19] TS = time.strptime(TIME, fmt) time_in_sec = calendar.timegm(TS) plate_values[reading_label] = {} plate_values[reading_label][time_in_sec] = {} elif e.tag == 'Well': W = e.attrib['Pos'] well_row = ord(W[0]) - ord('A') well_col = int(W[1:]) - 1 well = (well_row, well_col) elif e.tag == 'Multiple': if e.attrib['MRW_Position'] == 'Mean': measurement = e.text plate_values[reading_label][time_in_sec][well] = float(measurement) elif e.tag == 'Single': measurement = e.text if measurement == "OVER": plate_values[reading_label][time_in_sec][well] = None else: plate_values[reading_label][time_in_sec][well] = float(measurement) return plate_values
def checkasdm(dname): """ check if input data is ALMA/EVLA SDM """ isASDM=False isEVLA=False version ='' if(os.path.exists(dname+'/ASDM.xml')): from xml.etree.ElementTree import ElementTree rt = ElementTree(file=dname+'/ASDM.xml') iter = rt.getiterator() for k, n in iter[1].items(): if n =='ASDM': isASDM=True if k=='schemaVersion': #if int(n) == 1: # ver='2' #else: # ver = str(n) ver = str(n) version='v1.'+ver elif k=='entityId': if n.count('evla'): isEVLA=True else: isASDM=False isEVLA=False return (isASDM,isEVLA,version)
class RegisterView: def load_definitions(self, defs_file): self.tree = ElementTree() self.tree.parse(defs_file) self.reg_defs = self.tree.getiterator('register') def find_registers(self, reg_name): regs = filter(lambda x: x.attrib['name'].startswith(reg_name), self.reg_defs) return map(lambda x: x.attrib['name'], regs) def get_reg_element(self, reg_name): elems = filter(lambda x: x.attrib['name'] == reg_name, self.reg_defs) if len(elems) > 0: return elems[0] else: return None def extract_bits(self, val, bit_len, bit_offset): return (val >> bit_offset) & ((1<<bit_len) - 1) def get_reg_address(self, name): return eval(self.get_reg_element(name).attrib['address']) def print_reg(self, name, val): print "%s (*0x%08X) = 0x%08X\n" % (name, self.get_reg_address(name), val) reg = self.get_reg_element(name) for field in reg.getchildren(): bit_len = int(field.attrib['bitlength']) bit_offset = int(field.attrib['bitoffset']) bit_name = field.attrib['name'] description = field.attrib.get('description', 'no description') print "%s\t0x%X\t\t%s" % (bit_name, self.extract_bits(val, bit_len, bit_offset), description)
def get_feed(param): ''' Returns a news feed (usage: #news bbc -> bbc news feed). Get available feeds with #news (without parameters). ''' keys = NEWS_FEEDS.keys() if not param in keys: return 'Feed not in list. Loaded feeds: %s' % ', '.join(keys) handle = None try: handle = urllib2.urlopen(NEWS_FEEDS[param]) except urllib2.HTTPError: return 'Did not find feed.' rss = ElementTree() try: rss.parse(handle) except ExpatError: return 'Malformed XML. This feed sucks! (Clean ATOM or RSS required!)' handle.close() title = rss.find('channel/title') if title == None: return 'Did not find any matching tags.' items = '' counter = 0 for item in rss.getiterator('item'): if counter >= NEWS_RESULTS: break counter += 1 items += '%s: %s\n' % (item.find('title').text.strip(), shorten_url(item.find('link').text)) output = '[%s]\n%s' % (title.text.strip(), items[:-1]) return output
def checkasdm(dname): """ check if input data is ALMA/EVLA SDM """ isASDM = False isEVLA = False version = '' if (os.path.exists(dname + '/ASDM.xml')): from xml.etree.ElementTree import ElementTree rt = ElementTree(file=dname + '/ASDM.xml') iter = rt.getiterator() for k, n in iter[1].items(): if n == 'ASDM': isASDM = True if k == 'schemaVersion': #if int(n) == 1: # ver='2' #else: # ver = str(n) ver = str(n) version = 'v1.' + ver elif k == 'entityId': if n.count('evla'): isEVLA = True else: isASDM = False isEVLA = False return (isASDM, isEVLA, version)
def devmapping(self): """ Return a list of (src, dst) devices tuples fount in the container conifguration file. """ if not os.path.exists(self.cf): # not yet received from peer node ? return [] data = [] from xml.etree.ElementTree import ElementTree, SubElement tree = ElementTree() try: tree.parse(self.cf) except Exception as exc: return data for dev in tree.getiterator('disk'): s = dev.find('source') if s is None: continue if 'dev' not in s.attrib: continue src = s.attrib['dev'] s = dev.find('target') if s is None: continue if 'dev' not in s.attrib: continue dst = s.attrib['dev'] data.append((src, dst)) return data
class Feed(object): def __init__(self, opensearch, raw, remember=False): #print raw.read() self.opensearch = opensearch self.tree = ElementTree() self.tree.parse(raw) self.links = {} for link in self.tree.getiterator('{http://www.w3.org/2005/Atom}link'): if link.attrib.has_key('rel'): self.links[link.attrib['rel']] = link.attrib['href'] self.after_id = None self.next_url = None if self.links.has_key('next'): a = urlparse(self.links['next']) self.next_url = '%s?%s' % (a.path, a.query) if self.links.has_key('after'): a = urlparse(self.links['after']) p = parse_qs(a.query) self.after_id = p.get('after_id', None)[0] if remember: cache = "%s/%s" % (self.opensearch.cache_folder, cache_name(p['q'][0])) mkdir_p(cache) json.dump({'q': p['q'][0], 'after_id' : self.after_id, 'total' : len(self)}, open(cache, 'w+')) def keys(self): for entry in self.tree.getiterator('*'): yield entry.tag def __len__(self): return int(self.tree.find('{http://a9.com/-/spec/opensearch/1.1/}totalResults').text) def __iter__(self): for entry in self.tree.getiterator('{http://www.w3.org/2005/Atom}entry'): yield self.opensearch.entry(entry) if self.next_url != None: items_per_page = int(self.tree.find('{http://a9.com/-/spec/opensearch/1.1/}itemsPerPage').text) t = ElementTree() a = urlparse(self.next_url) p = parse_qs(a.query) p['q'] = p['q'][0] #print "pages:", len(self), 2, int(math.ceil(len(self) / items_per_page))+1 for page in range(2, int(math.ceil(len(self) / items_per_page)) +1): if page % 5 == 0: time.sleep(10) print "pause" p['page'] = page url = '%s?%s' % (a.path, urlencode(p)) t.parse(self.opensearch.raw_query(url)) for entry in t.getiterator('{http://www.w3.org/2005/Atom}entry'): yield self.opensearch.entry(entry)
def parseXMLRequestNew(filename): root = ElementTree(file=filename) #Create an iterator iter = root.getiterator() retDict = {} #for element in iter: #print str(datetime.datetime.now()) return getAttributes(iter[0])
def parse_agency(): try: agencyList = ET(file = "/home/josh/projects/xml_files/agencyList.xml") except ParseError: print "Unable to parse XML from file" for agency in agencyList.getiterator('agency'): a = Agency(agency_tag = agency.get('tag'), title = agency.get('title'), region = agency.get('regionTitle')) a.save()
def __init__(self): self.get_config() self.lines = [] if not os.path.exists(self.kml_file): return style = {} styl = '' zipped = False if self.kml_file[-4:] == '.kmz': # zipped file? zipped = True zf = zipfile.ZipFile(kml_file, 'r') inner_file = '' for name in zf.namelist(): if name[-4:] == '.kml': inner_file = name break if inner_file == '': return memory_file = StringIO.StringIO() memory_file.write(zf.open(inner_file).read()) root = ElementTree(fromstring(memory_file.getvalue())) else: kml_data = open(self.kml_file, 'rb') root = ElementTree(fromstring(kml_data.read())) # Create an iterator iterat = root.getiterator() for element in iterat: elem = element.tag[element.tag.find('}') + 1:] if elem == 'Style': for name, value in element.items(): if name == 'id': styl = value elif elem == 'color': style[styl] = self.colour elif elem == 'name': line_name = element.text elif elem == 'styleUrl': styl = element.text[1:] elif elem == 'coordinates': coords = [] coordinates = ' '.join(element.text.split()).split(' ') for i in range(len(coordinates)): coords.append([round(float(coordinates[i].split(',')[1]), 6), round(float(coordinates[i].split(',')[0]), 6)]) i = int(len(coords) / 2) if within_map(coords[0][0], coords[0][1], self.map_polygon) and \ within_map(coords[i][0], coords[i][1], self.map_polygon): try: self.lines.append(Line(line_name, style[styl], coords)) except: self.lines.append(Line(line_name, self.colour, coords)) if zipped: memory_file.close() zf.close() else: kml_data.close()
def parse_routes(): for agency in Agency.objects.all(): try: if agency.region == 'California-Northern': routeList=ET(file=urllib2.urlopen("http://webservices.nextbus.com/service/publicXMLFeed?command=routeList&a={0}".format(agency.agency_tag))) for route in routeList.getiterator('route'): r = Route(tag = route.get('tag'), title = route.get('title'), short_title = route.get('shortTitle'), agency=agency) r.save() except urllib2.URLError: print "unable to connect to url for agency {0}".format(agency.agency_tag)
def __init__(self,xml_file): self.max_len = 0 from xml.etree.ElementTree import ElementTree __tree = ElementTree() __tree.parse(xml_file) __header = __tree.getiterator('Header') __contents = __tree.getiterator('Contents') __dependancy = __tree.getiterator('Dependancy') __context = __tree.getiterator('Context') __actions = __tree.getiterator('Actions') __conditions = [ __header, __contents, __dependancy, __context, __actions] for _condition in __conditions: tag = _condition[0].tag self[tag] = Struct() for k in _condition: setattr(self[tag], k.text.strip(), None) self.max_len = max(self.max_len, len(k.text.strip()))
def cleanFile(filename, elementsToBeRemove): tree = ElementTree() tree.parse(filename) parent_map = dict((c, p) for p in tree.getiterator() for c in p) root = tree.getroot() for element in elementsToBeRemove: elementNode = tree.find(element) if (elementNode is not None): parent_map[elementNode].remove(elementNode) tree.write(filename)
class SubjectsParser(object): """Subjects file parser.""" def __init__(self, path): self.tree = ElementTree() self.tree.parse(path) def __iter__(self): for subject in self.tree.getiterator('subject'): long = subject.find('long').text short = subject.find('short').text yield Subject(long, short)
def cleanFile(filename): tree = ElementTree() tree.parse(filename) parent_map = dict((c, p) for p in tree.getiterator() for c in p) root = tree.getroot() for element in elementsToRemove: elementNode = tree.find(element) if elementNode is not None: parent_map[elementNode].remove(elementNode) tree.write(filename)
def normalizeXMLData(data): # Read in XML try: tree = ElementTree(file=StringIO.StringIO(data)) except Exception: raise ValueError("Could not parse XML data") # Apply filters for filter in filters: for node in tree.getiterator(filter): node.clear() return tostring(tree.getroot())
def get_mirrors(self, mirror_template=None): """ Provide a set of mirrors where you can get the distribution from """ # the main server is stored in the template self.main_server = self.source_template.base_uri # other used servers for medium in self.used_media: if not medium.startswith("cdrom:"): # seems to be a network source self.used_servers.append(medium) if len(self.main_sources) == 0: self.default_server = self.main_server else: self.default_server = self.main_sources[0].uri # get a list of country codes and real names self.countries = {} fname = "/usr/share/xml/iso-codes/iso_3166.xml" if os.path.exists(fname): et = ElementTree(file=fname) it = et.getiterator('iso_3166_entry') for elm in it: try: descr = elm.attrib["common_name"] except KeyError: descr = elm.attrib["name"] try: code = elm.attrib["alpha_2_code"] except KeyError: code = elm.attrib["alpha_3_code"] self.countries[code.lower()] = gettext.dgettext('iso_3166', descr) # try to guess the nearest mirror from the locale self.country = None self.country_code = None locale = os.getenv("LANG", default="en_UK") a = locale.find("_") z = locale.find(".") if z == -1: z = len(locale) country_code = locale[a+1:z].lower() if mirror_template: self.nearest_server = mirror_template % country_code if country_code in self.countries: self.country = self.countries[country_code] self.country_code = country_code
def get_mirrors(self, mirror_template=None): """ Provide a set of mirrors where you can get the distribution from """ # the main server is stored in the template self.main_server = self.source_template.base_uri # other used servers for medium in self.used_media: if not medium.startswith("cdrom:"): # seems to be a network source self.used_servers.append(medium) if len(self.main_sources) == 0: self.default_server = self.main_server else: self.default_server = self.main_sources[0].uri # get a list of country codes and real names self.countries = {} fname = "/usr/share/xml/iso-codes/iso_3166.xml" if os.path.exists(fname): et = ElementTree(file=fname) it = et.getiterator('iso_3166_entry') for elm in it: try: descr = elm.attrib["common_name"] except KeyError: descr = elm.attrib["name"] try: code = elm.attrib["alpha_2_code"] except KeyError: code = elm.attrib["alpha_3_code"] self.countries[code.lower()] = gettext.dgettext( 'iso_3166', descr) # try to guess the nearest mirror from the locale self.country = None self.country_code = None locale = os.getenv("LANG", default="en_UK") a = locale.find("_") z = locale.find(".") if z == -1: z = len(locale) country_code = locale[a + 1:z].lower() if mirror_template: self.nearest_server = mirror_template % country_code if country_code in self.countries: self.country = self.countries[country_code] self.country_code = country_code
def get_story_output(self, story_name): # find all potential output files and sort them by reverse modification time files = glob.glob(self.output_directory + "/*.output") files.sort(lambda a,b: cmp(os.stat(b)[stat.ST_MTIME], os.stat(a)[stat.ST_MTIME])) # now find the first file that contains output for the story_name tree = ElementTree() for f in files: tree.parse(f) for feature in tree.getiterator("feature"): if feature.attrib['name'] == (story_name + ".feature"): return parse_feature_from_xml(feature) return None
def __init__(self, xml_files, imageDir, imageURL): self.artworks = {} self.imageDir = imageDir self.imageURL = imageURL for xml_file in xml_files: xml = ElementTree(file=xml_file) for elt in xml.getiterator(): if elt.find('icon') is not None: art = Artwork(elt) if not art.name: logging.warn('Skiping icon with no name %s'%art.icon) self.artworks[art.name] = art
def parse_top100(filename): """ Parse an XML file with Top 100 data in it. """ tree = ElementTree() tree.parse(filename) rankings = [] chars = tree.getiterator("char") for index in xrange(len(chars)): char = chars[index] player = char.find("name").text level = char.find("level").text rank = index + 1 rankings.append((rank, player, int(level))) return rankings
def simplify_xml(in_file): """Simplifies BLAST xml files to reduce size.""" try: tree = ElementTree(file = in_file) for it in tree.getiterator('Iteration'): hit_list = it.getiterator('Hit') if len(hit_list) > 1: for hit in hit_list[1:]: hit.clear() elif len(hit_list) == 0: it.clear() tree.write(in_file) except: return None
def all_insts(mapfile): result = [] tree = ElementTree() tree.parse(mapfile) if xml.etree.ElementTree.VERSION.startswith("1.3."): it = tree.iter("ins") else: it = tree.getiterator("ins") for ins in it: fileaddr = int(ins.attrib['address'], base=16) result.append(fileaddr) return result
def main(args): run_xml = ElementTree() run_xml.parse(args[0]) run_attrib = [] for elem in run_xml.getiterator(): # change this when using Python 2.7 if elem.tag == "RUN": if len(run_attrib) == 3: for ra in run_attrib: print ra, print run_attrib = [] run_attrib.append(elem.attrib["accession"]) if elem.tag == "EXPERIMENT_REF": run_attrib.append(elem.attrib["accession"]) if elem.tag == "VALUE": if "[tagged" in elem.text: run_attrib.append(elem.text.split(" ")[0])
def main(): """Main""" tree = ElementTree() # Make sure valid filepath specified input_file = sys.argv[1] if not os.path.isfile(input_file): sys.exit("Invalid filepath specified") # Otherwise parse XML tree.parse(input_file) # Print CD label label = unquote(tree.find('label').text) print("%s\n%s" % (label, "=" * len(label))) songs = tree.getiterator("audio") # Iterate through songs and print info for i, song in enumerate(songs): # BPM filepath = unquote( unquote(song.find("uri").text).replace('file://', '')) ps = subprocess.Popen([ "sox", filepath, "-t", "raw", "-r", "44100", "-e", "float", "-c", "1", "-" ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) output = subprocess.check_output(('bpm'), stdin=ps.stdout) bpm = output.decode().strip() # Title and artist track_info = "" artist = unquote(song.findtext("artist", "")) title = unquote(song.findtext("title", "")) if len(artist) > 0: track_info = "%s - %s" % (artist, title) else: track_info = title # Output song info print("%d. %s (**%s**)" % (i + 1, track_info, bpm))
def parseTrkpnts(gpx): namespace = '{http://www.topografix.com/GPX/1/0}' root = ElementTree(file=gpx) trkpnts = [] for element in root.getiterator(): if element.tag == namespace + 'trkpt' and element.keys(): for name, value in element.items(): if name == 'lat': lat = value if name == 'lon': lon = value for child in element.getchildren(): if child.tag == namespace + 'ele': ele = child.text if child.tag == namespace + 'time': time = child.text trkpnts.append( {'ele': float(ele), 'time':time, 'lat':float(lat), 'lon':float(lon)} ) return trkpnts
class Metadata(object): def __init__(self, filename): self.parmConfObjList = [] self.readXml(filename) def readXml(self, filename): self.projectElement = None self.procUnitConfObjDict = {} self.projectElement = ElementTree().parse(filename) self.project = self.projectElement.tag parmElementList = self.projectElement.getiterator(ParameterConf().getElementName()) for parmElement in parmElementList: parmConfObj = ParameterConf() parmConfObj.readXml(parmElement) self.parmConfObjList.append(parmConfObj)
def get_weather(param): ''' Get weather details for your city. Other location can be checked too: #weather [location] (e.g. #weather Berlin) ''' location = WEATHER_LOCATION if param != '': if not re.match('^[\w\d ]+$', param): return 'Invalid location. Use city name or postal code.' location = param try: location = quote(location.encode('utf8', 'replace')) handle = urlopen('http://www.google.com/ig/api?weather=%s' % location) except HTTPError as error: return u'%s' % error tree = ElementTree() tree.parse(handle) handle.close() city = tree.find('weather/forecast_information/city') if city == None: return 'Did not find city.' city = city.attrib['data'] condition = tree.find('weather/current_conditions/condition').attrib['data'] curtemp = tree.find('weather/current_conditions/temp_c').attrib['data'] weather = u'Currently: %s at %s°C in %s\n' % (condition, curtemp, city) for condition in tree.getiterator('forecast_conditions'): minTemp = condition.find('low').attrib['data'] maxTemp = condition.find('high').attrib['data'] day = condition.find('day_of_week').attrib['data'] cond = condition.find('condition').attrib['data'] if cond == '': cond = u'Unknown condition' unit = u'F' if WEATHER_IN_CELSIUS: # convert fahrenheit to celsius minTemp = (float(minTemp) - 32.0) * (5.0/9) maxTemp = (float(maxTemp) - 32.0) * (5.0/9) unit = u'°C' weather += u'%s: %s at %d-%d%s\n' % ( day, cond, round(minTemp), round(maxTemp), unit ) return weather[:-1]
class RegisterView: def load_definitions(self, defs_file): self.tree = ElementTree() self.tree.parse(path.expanduser(defs_file)) reggroups = self.tree.findall(".//registergroup") for rg in reggroups: # Create a full name for the register based on the register group if required # Some registers don't use the base/group name, so fall back to register name for r in rg.findall('./register'): try: fullname = rg.attrib['name'] + '_' + r.attrib['name'].split('_',1)[1] except: fullname = r.attrib['name'] r.set('fullname',fullname) self.reg_defs = self.tree.getiterator('register') print "Loaded register definitions:", path.expanduser(defs_file) def find_registers(self, reg_name): regs = filter(lambda x: x.attrib['fullname'].startswith(reg_name), self.reg_defs) return map(lambda x: x.attrib['fullname'], regs) def get_reg_element(self, reg_name): elems = filter(lambda x: x.attrib['fullname'] == reg_name, self.reg_defs) if len(elems) > 0: return elems[0] else: return None def extract_bits(self, val, bit_len, bit_offset): return (val >> bit_offset) & ((1<<bit_len) - 1) def get_reg_address(self, name): return eval(self.get_reg_element(name).attrib['address']) def print_reg(self, name, val): print "%s (*0x%08X) = 0x%08X\n" % (name, self.get_reg_address(name), val) reg = self.get_reg_element(name) for field in reg.getchildren(): bit_len = int(field.attrib['bitlength']) bit_offset = int(field.attrib['bitoffset']) bit_name = field.attrib['name'] description = field.attrib.get('description', 'no description') print "%s\t0x%X\t\t%s" % (bit_name, self.extract_bits(val, bit_len, bit_offset), description)
def _set_led(self, val): global glob_led_policy, glob_led_signal_list if not self.preview_list[val].display_type == 'not_checked': return new_signal = ElementTree() parser = XMLTreeBuilder() if val == 3: parser.feed(glob_signal_ALL) else: parser.feed(glob_signal_RBO) new_signal._root = parser.close() new_signal_list = new_signal.getiterator("signal") new_signal_list[0].attrib = glob_led_signal_list[ self.signal_num].attrib new_signal_list[0].tail = glob_led_signal_list[self.signal_num].tail glob_led_policy.remove(glob_led_signal_list[self.signal_num]) glob_led_policy.insert(self.signal_num, new_signal_list[0]) glob_led_signal_list[self.signal_num] = new_signal_list[0] for i in range(len(self.preview_list)): self.preview_list[i].display_type = self._get_led(i) self.preview_list[i].reset_view()
def list(self): url = Zerigo._url_api + Zerigo._url_zones Zerigo._logger.debug('retrieving ' + url) reply = self._conn.get(url) try: Zerigo._logger.debug(reply.headers['x-query-count'] + ' zone(s) for account: ' + Zerigo.user) except KeyError: raise ParseError() tree = ElementTree() tree.parse(reply.body_file) list = {} zones = tree.getiterator('zone') for it in zones: name = it.find('domain') id = it.find('id') if id is None or name is None or id.text is None or name.text is None: raise ParseError() list[name.text] = Zone(name.text, id.text) return list
def onRefreshClick(self, widget): if not self.steamid.get_text(): return files = [f for f in listdir(SteamApps) if isfile(join(SteamApps, f))] appids = [] for file in files: m = re.search(r"appmanifest_([0-9]+).acf", file) if m: appids.append(int(m.groups(1)[0])) url = "http://steamcommunity.com/id/" + self.steamid.get_text( ) + "/games?tab=all&xml=1" html = urlopen(url) tree = ElementTree() tree.parse(html) games_xml = tree.getiterator('game') for game in games_xml: appid = int(game.find('appID').text) name = game.find('name').text exists = appid in appids self.game_liststore.append([exists, appid, name])
def parseTrkpnts(gpx): namespace = '{http://www.topografix.com/GPX/1/0}' root = ElementTree(file=gpx) trkpnts = [] for element in root.getiterator(): if element.tag == namespace + 'trkpt' and element.keys(): for name, value in element.items(): if name == 'lat': lat = value if name == 'lon': lon = value for child in element.getchildren(): if child.tag == namespace + 'ele': ele = child.text if child.tag == namespace + 'time': time = child.text trkpnts.append({ 'ele': float(ele), 'time': time, 'lat': float(lat), 'lon': float(lon) }) return trkpnts
def list(self): # This list (but up to 300 hosts) is also returned when we get # <_url_list> if (self._id is None): raise NotFound(self.name) url = Zerigo._url_api + Zone._url_hosts.substitute(zone_id=self._id) Zerigo._logger.debug('retrieving ' + url) reply = self._conn.get(url) try: Zerigo._logger.debug(reply.headers['x-query-count'] + ' host(s) for zone: ' + self.name) except: raise ParseError() tree = ElementTree() tree.parse(reply.body_file) list = {} hosts = tree.getiterator('host') for it in hosts: id = it.find('id') hostname = it.find('hostname') type = it.find('host-type') data = it.find('data') if id is None or type is None or data is None: raise ParseError() if hostname is None \ or 'nil' in hostname.attrib and hostname.attrib['nil'] == 'true': hostname = '@' # Bind notation else: hostname = hostname.text host = Host(hostname, self.name, id.text, self) host.type = type.text host.data = data.text list[hostname] = host return list
def refresh_appids(self): """ Fetch user library """ self.game_liststore.clear() if not self.steamid.get_active_text(): return files = [ f for f in os.listdir(self.steam_apps_path) if os.path.isfile(os.path.join(self.steam_apps_path, f)) ] appids = [] for file_name in files: match = re.search(r"appmanifest_([0-9]+).acf", file_name) if match: appids.append(int(match.groups(1)[0])) url = "https://steamcommunity.com/id/" + self.steamid.get_active_text( ) + "/games?tab=all&xml=1" html = urlopen(url) tree = ElementTree() tree.parse(html) games_xml = tree.getiterator('game') for game in games_xml: appid = int(game.find('appID').text) name = game.find('name').text exists = appid in appids self.game_liststore.append([exists, appid, name]) error = tree.find('error') if getattr(error, 'text', False): self._infobar_message(error.text) else: self.infobar.hide()
def get_word_xml(self, word): # mini版本 # word_page = "http://dict.cn/mini.php?q=" + word # xml版本 word_page = "http://dict.cn/ws.php?q=" + word content = urllib2.urlopen(word_page).read() # print content try: content = unicode(content, 'gbk').encode("utf8") except UnicodeDecodeError: return None # print content # 删除掉原来文件中编码指示,否则会出错 # content = content.replace(' encoding="GBK" ', '') # 直接去掉文件头声明 content = content.replace('<?xml version="1.0" encoding="GBK" ?>', '') tree = ElementTree() tree.parse(StringIO(content)) word = tree.getiterator("dict") if word[0].getchildren()[0].text != "Not Found": audio = word[0].find("audio") if audio is not None: word[0].remove(audio) # print tostring(word[0], 'utf8') return self.__xml_parse(word) # 按字符串打印 # return tree else: return None
def remap(self): path = self.devpaths[Env.nodename] paths = set(self.devpaths.values()) - set(self.devpaths[Env.nodename]) from xml.etree.ElementTree import ElementTree tree = ElementTree() try: tree.parse(self.svc.resources_by_id['container'].cf) except: self.log.error("failed to parse %s" % self.svc.resources_by_id['container'].cf) raise ex.Error for dev in tree.getiterator('disk'): s = dev.find('source') if s is None: continue il = s.items() if len(il) != 1: continue attr, devp = il[0] if devp in paths: self.log.info("remapping device path: %s -> %s" % (devp, path)) s.set('dev', path) #SubElement(dev, "source", {'dev': path}) tree.write(self.svc.resources_by_id['container'].cf)
def install(language, directory = config.default_dict_path, repos = config.default_repository, use_description = True): ''' Download and install a dictionary file. language: a string of the form 'll_CC'. Example: 'en_US' for English, USA directory: the installation directory. Defaults to the value given in config.py. After installation this is the package root of 'hyphen' repos: the url of the dictionary repository. (Default: as declared in config.py; after installation of PyHyphen this is LibreOffice's GIT repository .). ''' # Download the dictionaries.xcu file from the LibreOffice repository if needed if use_description: # first try full language name; it won't work in all cases... language_ext_name = language descr_url = repos + language_ext_name + '/dictionaries.xcu' try: descr_file = urlopen(descr_url) except URLError: # OK. So try with the country code. language_ext_name = language[:2] descr_url = repos + language_ext_name + '/dictionaries.xcu' try: descr_file = urlopen(descr_url) except URLError: descr_file = None # Parse the xml file if it is present, and extract the data. if use_description and descr_file: descr_tree = ElementTree(file = descr_file) # Find the nodes containing meta data of hyphenation dictionaries # Iterate over all nodes for node in descr_tree.getiterator('node'): # Check if node relates to a hyphenation dict. # We assume this is the case if an attribute value # contains the substring 'hyph' node_values = [i[1] for i in node.items()] iter_values = (i for i in node_values if ('hyph' in i.lower())) for v in iter_values: # Found one! So extract the data and construct the local record for property in node.getchildren(): prop_values = [j[1] for j in property.items()] for pv in prop_values: if pv.lower() == 'locations': # Its only child's text is a list of strings of the form %origin%<filename> # For simplicity, we only use the first filename in the list. raw_dict_fn = property.getchildren()[0].text.split()[0] dict_fn = raw_dict_fn[9:] # strip the prefix '%origin%' dict_url = ''.join((repos, language_ext_name, '/', dict_fn)) break # skip any other values of this property elif pv.lower() == 'locales': # Its only child's text is a list of locales. . dict_locales = property.getchildren()[0].text.replace('-', '_').split() break # skip any other values of this property # Install the dictionary file dict_str = urlopen(dict_url).read() filepath = directory + '/' + dict_fn with open(filepath, 'wb') as dict_file: dict_file.write(dict_str) # Save the metadata # Generate a record for each locale, overwrite any existing ones new_dict = DictInfo(dict_locales, filepath, url = dict_url) for l in dict_locales: hyphen.dict_info[l] = new_dict # handle the case that there is no xml metadata else: # Download the dictionary guessing its URL dict_fn = ''.join(('hyph_dict_', language, '.dic')) dict_url = ''.join((repos, dict_fn)) dict_str = urlopen(dict_url).read() filepath = directory + '/' + dict_fn with open(filepath, 'w') as dict_file: dict_file.write(dict_str) # Store the metadata new_dict = DictInfo([language], filepath) # the URL is thus set to None. hyphen.dict_info[language] = new_dict # Save the modified metadata save_dict_info()
def task_runTest(TestCase): TestExecutionInfo = TestCase[-1] TestCase = TestCase[:-1] # Construct the report directory suffix, eg. SMP96362GWV_SMP96368GWV TestExecutionInfo['DirSuffix'] = '' for DUT in TestCase: TestExecutionInfo['DirSuffix'] += '_' + DUT['boardID'] if 'TCLSCRIPT' in TestExecutionInfo: print timestamp(), '\tRun TCL test script, voice cli files are used as pre-test board configuration' pbxml_fh = gen_phonebookXml(TestCase) # Determine if there is CLI file supplied. # If there is CLI files, run number of CLI sets times runOnce = False CLIFiles = [] for DUT in TestCase: if 'CLIFile' in DUT: CLIFiles.append(DUT['CLIFile']) else: runOnce = True # Construct CLI command lists from CLI xml files BoardCLIList = [] for clifile in CLIFiles: root = ElementTree(file=clifile) cmd_iter = root.getiterator() cmdSets = [] for cmdSet in cmd_iter: if cmdSet.getchildren(): for child in cmdSet: cmd = {} # print child.get('Cmdname') cmd['Cmdname'] = child.get('Cmdname') cmd['Cmd'] = child.text cmdSets.append(cmd) BoardCLIList.append(cmdSets) if runOnce: total_numRuns = 1 else: total_numRuns = len(BoardCLIList[0]) # number of CLI sets for numRuns in range(total_numRuns): time.sleep(10) # Send voice cli commands to the DUTs CurrSuffix = '' if (len(BoardCLIList) > 0): DutIx = 0 # Send the CLI command sets to the DUTs for DUT in TestCase: print timestamp(), '\tDUT#'+ str(DutIx+1)+ '@'+ DUT['IP'], 'Running command set:',\ BoardCLIList[DutIx][numRuns]['Cmdname'] telconn = tel_connect(DUT['IP']) if not telconn: print timestamp(), 'Failed to send voice CLI commands', DUT['boardID'] + '(' + DUT['IP'] + ')' return TASK_STATUS_FAIL, 'Telnet connection failed' # Sends the numRuns command set cmdlist = BoardCLIList[DutIx][numRuns]['Cmd'].split('\n') for cmd in cmdlist: print timestamp()+'\t'+ DUT['IP']+'>', cmd telconn.write(cmd+'\n') time.sleep(10) CurrSuffix += '_' + BoardCLIList[DutIx][numRuns]['Cmdname'] # for the current result directory DutIx += 1 # Run tcl script as subprocess and poll its status Dirname = TestExecutionInfo['TestName'] + '_' + strftime('%Y%m%d-%H-%M') + TestExecutionInfo['DirSuffix']+CurrSuffix print Dirname print timestamp(), '\tCreating tcl script process' if os.name == 'nt': # windows tclexe = 'C:\\Tcl\\bin\\tclsh.exe ' else: # unix-likes are 'posix' tclexe = './' print tclexe print TestExecutionInfo['TCLSCRIPT'] tclProcess = subprocess.Popen([tclexe+TestExecutionInfo['TCLSCRIPT'],\ TestExecutionInfo['AbacusEnv'],\ TestExecutionInfo['Duration_HR'],\ TestExecutionInfo['Duration_MIN'],\ TestExecutionInfo['ReportTemplate'],\ PHONEBOOK_FILENAME,\ Dirname], shell=True) status = TASK_STATUS_SUCCESS errmsg = '' if tclProcess: # Poll the process status while status == TASK_STATUS_SUCCESS: returncode = tclProcess.poll() if returncode == 0: status = TASK_STATUS_SUCCESS errmsg = 'Tcl script done execution without error.' break; elif returncode == 1: status = TASK_STATUS_FAIL errmsg = 'Tcl script failed and terminated.' elif returncode == 2: status = TASK_STATUS_FAIL errmsg = 'Abacus test eneded with errors, please check report' time.sleep(15) # Remember the test report directories for notification global ReportDirList ReportDirList.append(Dirname) return status, errmsg else: print timestamp(), 'Run voice cli only (NOT IMPLEMENTED)' return TASK_STATUS_SUCCESS, ''
class XmlASTVisitor(compiler.visitor.ASTVisitor): """ XmlASTVisitor is a simple visitor that will build an xml hierarchy representing the code being parsed. This is intended to be the basic IPC so that we might add python code completion to MonoDevelop. """ def __init__(self): """ Initializes the visitor and sets the stream to be used for outputing xml data. """ compiler.visitor.ASTVisitor.__init__(self) self.tree = ElementTree(element=Element('module')) self.root = self.tree.getroot() def append(self, parent, child): if parent is None: parent = self.root parent.append(child) def walkChildren(self, node, parent=None): for child in node.getChildNodes(): child.parent = node self.dispatch(child, parent) def default(self, node, parent=None): self.walkChildren(node, parent) def _haschildattr(self, element, key, value): for child in element.getiterator(): if child.get(key, None) == value: return True return False def visitAssAttr(self, node, parent=None): if hasattr(node.expr, 'name'): if node.expr.name == 'self' and parent: # walk up until we reach the parent class while parent: # add attriute child if one does not exist for attrname if parent.tag == 'class' \ and not self._haschildattr(parent, 'name', node.attrname): element = Element('attribute') element.set('name', node.attrname) element.set('line', str(node.lineno - 1)) # convert to md base self.append(parent, element) break parent = self.getParent(parent) def visitAssName(self, node, parent=None): element = Element('attribute') element.set('name', node.name) element.set('line', str(node.lineno - 1)) self.append(parent, element) def visitClass(self, node, parent=None): # build the class element element = Element('class') element.set('name', node.name) element.set('line', str(node.lineno)) # get the end of the class def walk(n, e): for c in n.getChildNodes(): if c.lineno > e: e = c.lineno e = walk(c, e) return e endline = walk(node, node.lineno) element.set('endline', str(endline)) # add class docs if node.doc: docElement = Element('doc') docElement.text = node.doc element.append(docElement) # add ourselves to the hierarchy self.append(parent, element) # walk our children, now we are the parent self.walkChildren(node, element) def visitFrom(self, node, parent=None): for name in node.names: element = Element('import') element.set('line', str(node.lineno)) element.set('module', node.modname + '.' + name[0]) element.set('name', name[1] or name[0]) self.append(parent, element) def visitFunction(self, node, parent=None): element = Element('function') element.set('name', node.name) element.set('line', str(node.lineno)) # get the end of the function def walk(n, e): for c in n.getChildNodes(): if c.lineno > e: e = c.lineno e = walk(c, e) return e endline = walk(node, node.lineno) element.set('endline', str(endline)) # add our function arguments for pos, name in zip(range(len(node.argnames)), node.argnames): argElement = Element('argument') argElement.set('pos', str(pos)) argElement.set('name', name) element.append(argElement) if node.kwargs and node.varargs: element[-1].set('name', '**' + element[-1].get('name')) element[-2].set('name', '*' + element[-2].get('name')) elif node.varargs: element[-1].set('name', '*' + element[-1].get('name')) elif node.kwargs: element[-1].set('name', '**' + element[-1].get('name')) # add function docs if node.doc: docElement = Element('doc') docElement.text = node.doc element.append(docElement) # add ourselves to the hierarchy self.append(parent, element) # walk our children, now we are the parent self.walkChildren(node, element) def visitImport(self, node, parent=None): for name in node.names: element = Element('import') element.set('line', str(node.lineno)) element.set('name', name[1] or name[0]) element.set('module', name[0]) self.append(parent, element) def getParent(self, element): for parent in self.tree.getiterator(): for child in parent: if child == element: return parent
#print 'tree infilename =', tree_fname attr_infilename = "_".join([file_id, infile_type1]) print 'attributes infilename = ', attr_infilename log_file.write('attributes infilename = ' + attr_infilename + '\n') attr_fname = os.path.join(attr_dir_path, attr_infilename) #print 'attributes infilename = ', attr_fname GO_tree = ElementTree(file=tree_fname) # create dictionary containing all nodes (key) and their immediate parents (value) # each node has a single parent, but multiple nodes may have the same phenotype id node_dict = {} for parent in GO_tree.getiterator(): for child in parent: node_dict[child] = parent ############################################### # create second dictionary containing node paths node_path = {} for node in node_dict.keys(): #initialise node parent set node_path[node] = [node] #initialise parent node parent_node = node_dict[node]
#heading = rotation about z-axis ez = atan2(2.0 * (x * y + z * w), (sqx - sqy - sqz + sqw)) #bank = rotation about x-axis ex = atan2(2.0 * (y * z + x * w), (-sqx - sqy + sqz + sqw)) #attitude = rotation about y-axis ey = asin(clamp(-2.0 * (x * z - y * w), -1.0, 1.0)) return [degrees(v) for v in (ex, ey, ez)] doc = headxml ents = source.getiterator("entity") for ent in ents: #digs these from the DC called RexPrimExportData mesh = None materials = skeleton = pos = ort = scale = "" scaletoprim = False for comp in ent: attrib = comp.attrib if attrib['type'] == 'EC_DynamicComponent' and attrib[ 'name'] == 'RexPrimExportData': mesh = attrval(comp, 'MeshRef') materials = attrval(comp, 'Materials') skeleton = attrval(comp, 'SkeletonRef') #mesh, materials, skeleton = [changeassetref(s) for s in (mesh, materials, skeleton)]