def write(self, filename, midi): midifile = Element('MidiFile') header = HeaderChunk() header.write(midifile, midi.header) i = 0 for cur in midi.tracks: node = SubElement(midifile, '_' + str(i)) #garantit l'ordre des datas temp = TrackChunk() temp.write(node, cur) i += 1 tree = ElementTree(midifile) tree.write(filename, encoding="utf-8") import xml.dom.minidom root = xml.dom.minidom.parse(filename) f = open(filename, "w") f.write(root.toprettyxml())
def loadLanguageFile(langfile): """ Loads one or more languages from disk Returns list of language codes found.""" global languagenames, languages tree = ElementTree(file=langfile) foundlangs = [] langslist = tree.find("wurmlanguages") langs = langslist.findall("languagedef") for language in langs: code = language.attrib["code"] name = language.attrib.get("name", code) languagenames[code] = name languages.setdefault(code, {}) # make sure the base map is ready if code not in foundlangs: foundlangs.append(code) lstrings = language.findall("string") for lstr in lstrings: orig = _parse(lstr.find("original").text.strip()) tran = _parse(lstr.find("translation").text.strip()) languages[code][orig] = tran return foundlangs
def fulltext_search(self, query, rows=None, start=None): """Does an advanced search on fulltext:blah. You get back a pair (x,y) where x is the total # of hits and y is a list of identifiers like ["foo", "bar", etc.]""" query = self._prefix_query('fulltext', query) result_list = self.raw_search(query, rows=rows, start=start) e = ElementTree() try: e.parse(StringIO(result_list)) except SyntaxError as e: raise SolrError(e) total_nbr_text = e.find('info/range_info/total_nbr').text # total_nbr_text = e.find('result').get('numFound') # for raw xml total_nbr = int(total_nbr_text) if total_nbr_text else 0 out = [] for r in e.getiterator('hit'): for d in r.find('metadata'): for x in list(d.getiterator()): if x.tag == "identifier": xid = six.text_type(x.text).encode('utf-8') if xid.startswith('OCA/'): xid = xid[4:] elif xid.endswith('.txt'): xid = xid.split('/')[-1].split('_')[0] elif xid.endswith('_ZZ'): xid = xid[:-3] out.append(xid) break return (total_nbr, out)
def edx(self, out_dir): # Copy the image to the static directory static_dir = os.path.join(out_dir, 'static') if not os.path.exists(static_dir): os.makedirs(static_dir) # In order to get an unique filename inside edx, we have to prefix the project and group name # We cannot use the filename, because it may contain characters, that have to be escaped. # Therefore we just add the extension, which is expected to contain [a-z][A-Z][0-9]. _, fileExtension = os.path.splitext(self.path) target_filename = self.url_name()+fileExtension target_path = os.path.join(static_dir,target_filename) shutil.copyfile(os.path.join(self.parent.path, self.path), target_path); html_dir = os.path.join(out_dir, 'html') if not os.path.exists(html_dir): os.makedirs(html_dir) html = Element('html', {'filename':self.url_name(), 'display_name':"Img"}); tree = ElementTree(html) tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) ) # We have to double %% because % is a placeholder for the argument html = '<img src="/static/%(file)s">' % {'file':target_filename} html += '''<br> <a href="/static/%(file)s">Download Image %(name)s</a> ''' % {'file':target_filename, 'name':os.path.basename(self.path)} with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f: f.write(html)
def serialize(data, name='object'): content_elem = Element(name) _serialize(content_elem, data) tree = ElementTree(content_elem) f = StringIO() tree.write(f, 'UTF-8') return f.getvalue()
def merge(self): last_hr = 0 self.tacx_tcx = self._parse_file(self.tacx_tcx_file) self.hr_tcx = self._parse_file(self.hr_tcx_file) for tp in self.tacx_tcx.dom_trackpoints: timestamp = tp.find('role:Time', ns).text timestamp_key = timestamp[0:19] if timestamp_key in self.hr_tcx.TrackPoints.keys(): heartrate_from_other_file = self.hr_tcx.TrackPoints[timestamp_key].HeartRateBpm if heartrate_from_other_file is not None: hr_node = self._create_heartrate(heartrate_from_other_file) tp.append(hr_node) last_hr = heartrate_from_other_file else: hr_node = self._create_heartrate(last_hr) tp.append(hr_node) tree = ElementTree(self.tacx_tcx.root) tree.write(open(self.file_name, 'wb'), encoding="utf-8", xml_declaration=True) # Add UGLY! temporary fix for namespace "ns1-issue" f = open (self.file_name, "r") data = f.read() data = data.replace('ns1:TPX', 'TPX') data = data.replace('ns1:Speed', 'Speed') data = data.replace('ns1:Watts', 'Watts') f.close f = open(self.file_name, "w") f.write(data) f.close() return self.file_name
def edx(self, out_dir): # Copy the Pdf to the static directory static_dir = os.path.join(out_dir, 'static') if not os.path.exists(static_dir): os.makedirs(static_dir) # In order to get an unique filename inside edx, we have to prefix the project and group name target_filename = self.url_name()+'.pdf' target_path = os.path.join(static_dir,target_filename) shutil.copyfile(os.path.join(self.parent.path, self.path), target_path); html_dir = os.path.join(out_dir, 'html') if not os.path.exists(html_dir): os.makedirs(html_dir) html = Element('html', {'filename':self.url_name(), 'display_name':"Pdf"}); tree = ElementTree(html) tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) ) # We have to double %% because % is a placeholder for the argument html = '' if courseURL == None: logging.warning("courseURL is not specified. Therefore the inline pdf-viewer will be disabled.") else: html += ''' <object data="%(courseURL)s/asset/%(file)s" type="application/pdf" width="100%%" height="600pt"> ''' %{'courseURL':courseURL , 'file':target_filename} html += ''' <a href="/static/%(file)s">Download Pdf %(name)s</a> ''' % {'file':target_filename, 'name':os.path.basename(self.path)} html += "</object>" with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f: f.write(html)
def test_3x32mb_download_from_xml(self): '''Download three randomly-generated 32MB files from a GT server via an XML manifest''' uuid1 = self.data_upload_test(1024 * 1024 * 32) uuid2 = self.data_upload_test(1024 * 1024 * 32) uuid3 = self.data_upload_test(1024 * 1024 * 32) uuids = [uuid1, uuid2, uuid3] # build a XML result set result_set = Element('ResultSet') result_1 = SubElement(result_set, 'Result') analysis_data_uri_1 = SubElement(result_1, 'analysis_data_uri') analysis_data_uri_1.text = '%s/cghub/data/analysis/download/' \ % TestConfig.HUB_SERVER + str(uuid1) result_2 = SubElement(result_set, 'Result') analysis_data_uri_2 = SubElement(result_2, 'analysis_data_uri') analysis_data_uri_2.text = '%s/cghub/data/analysis/download/' \ % TestConfig.HUB_SERVER + str(uuid2) result_3 = SubElement(result_set, 'Result') analysis_data_uri_3 = SubElement(result_3, 'analysis_data_uri') analysis_data_uri_3.text = '%s/cghub/data/analysis/download/' \ % TestConfig.HUB_SERVER + str(uuid3) doc = ElementTree(result_set) f = NamedTemporaryFile(delete=False, suffix='.xml') doc.write(f) f.close() self.data_download_test_xml(f.name, uuids) os.remove(f.name)
class _DXML(object): def __init__(self, path, tagmap={}, tagdefault=None, **options): self._path, self._options = path, options self._tree = ElementTree() self._tree.parse(self._path) self.verbosity, self.traceback, self.graceful = 1, False, False self._tagmap = tagmap self._tagdefault = self._trivial if tagdefault is None else tagdefault def __iter__(self): self._preiter_hook() # Stage 1: namespaces for o in self._xml_namespaces(): # IGNORE:E1101 yield o # Stage 2: resources r = self._tree.getroot() for e in [r] + r.getchildren(): # IGNORE:E1101 try: for o in self._tagmap.get(e.tag, self._tagdefault)(e): yield o except Exception, x: self._except(Exception, x) # Stage 3: inheritance etc. self._postiter_hook()
def get_package_name(path): """ Get the name of the ROS package that contains *path*. This is determined by finding the nearest parent ``manifest.xml`` file. This routine may not traverse package setups that rely on internal symlinks within the package itself. :param path: filesystem path :return: Package name or ``None`` if package cannot be found, ``str`` """ #NOTE: the realpath is going to create issues with symlinks, most #likely. parent = os.path.dirname(os.path.realpath(path)) #walk up until we hit ros root or ros/pkg while not os.path.exists(os.path.join(path, MANIFEST_FILE)) and not os.path.exists(os.path.join(path, PACKAGE_FILE)) and parent != path: path = parent parent = os.path.dirname(path) # check termination condition if os.path.exists(os.path.join(path, MANIFEST_FILE)): return os.path.basename(os.path.abspath(path)) elif os.path.exists(os.path.join(path, PACKAGE_FILE)): root = ElementTree(None, os.path.join(path, PACKAGE_FILE)) return root.findtext('name') else: return None
def edx(self, out_dir): video_dir = os.path.join(out_dir, 'video') if not os.path.exists(video_dir): os.makedirs(video_dir) video = Element('video', {'youtube':'1.00:'+self.youtube_id, 'youtube_id_1_0':self.youtube_id}); tree = ElementTree(video) tree.write(os.path.join(video_dir, "{0}.xml".format(self.url_name())) )
def edx(self, out_dir): # Copy the Pdf to the static directory static_dir = os.path.join(out_dir, 'static') if not os.path.exists(static_dir): os.makedirs(static_dir) # In order to get an unique filename inside edx, we have to prefix the project and group name _, fileExtension = os.path.splitext(self.path) target_filename = self.url_name()+fileExtension target_path = os.path.join(static_dir,target_filename) shutil.copyfile(os.path.join(self.parent.path, self.path), target_path); html_dir = os.path.join(out_dir, 'html') if not os.path.exists(html_dir): os.makedirs(html_dir) html = Element('html', {'filename':self.url_name(), 'display_name':"File"}); tree = ElementTree(html) tree.write(os.path.join(html_dir, "{0}.xml".format(self.url_name())) ) (_ , filename) = os.path.split(self.path) html = ''' <a href="/static/%(file)s">Download %(filename)s</a> ''' % {'file':target_filename, 'filename':filename} with codecs.open(os.path.join(html_dir, "{0}.html".format(self.url_name())), mode='w', encoding='utf-8') as f: f.write(html)
def add_from_file(self, filename): '''parses xml file and stores wanted details''' Gtk.Builder.add_from_file(self, filename) # extract data for the extra interfaces tree = ElementTree() tree.parse(filename) ele_widgets = tree.getiterator("object") for ele_widget in ele_widgets: name = ele_widget.attrib['id'] widget = self.get_object(name) # populate indexes - a dictionary of widgets self.widgets[name] = widget # populate a reversed dictionary self._reverse_widget_dict[widget] = name # populate connections list ele_signals = ele_widget.findall("signal") connections = [ (name, ele_signal.attrib['name'], ele_signal.attrib['handler']) for ele_signal in ele_signals] if connections: self.connections.extend(connections) ele_signals = tree.getiterator("signal") for ele_signal in ele_signals: self.glade_handler_dict.update( {ele_signal.attrib["handler"]: None})
def getEvents(feed): """ Creates events from an ATOM feed with GeoRSS points. """ events = [] tree = ElementTree() tree.parse(feed) entries = ElementTree(tree).iter('{http://www.w3.org/2005/Atom}entry') for entry in entries: author = entry.find('{http://www.w3.org/2005/Atom}author') try: name = author.find('{http://www.w3.org/2005/Atom}name').text uri = author.find('{http://www.w3.org/2005/Atom}uri').text except AttributeError: continue try: point = entry.find('{http://www.georss.org/georss}point').text latitude = point.split()[0] longitude = point.split()[1] except AttributeError: continue published = parse_date( entry.find('{http://www.w3.org/2005/Atom}published').text ) event = Event(name, uri, published, latitude, longitude) events.append(event) return events
def parse_categories(fn): categories = {} et = ElementTree() tree = et.parse(fn) for elem in tree.findall('Category'): parse_category(elem, None, categories) return categories
def run(self): creds = [] directory = constant.profile['USERPROFILE'] + u'\\Documents\\Rogue\'s Tale\\users' # The actual user details are stored in *.userdata files if os.path.exists(directory): files = os.listdir(directory) for f in files: if re.match('.*\.userdata', f): # We've found a user file, now extract the hash and username xmlfile = directory + '\\' + f tree = ElementTree(file=xmlfile) root = tree.getroot() # Double check to make sure that the file is valid if root.tag != 'user': self.warning(u'Profile %s does not appear to be valid' % f) continue # Now save it to credentials creds.append({ 'Login': root.attrib['username'], 'Hash': root.attrib['password'] }) return creds
def list_articles(target_directory, supplementary_materials=False, skip=[]): listing = listdir(target_directory) for filename in listing: result_tree = ElementTree() result_tree.parse(path.join(target_directory, filename)) for tree in result_tree.iterfind('article'): pmcid = _get_pmcid(tree) if pmcid in skip: continue result = {} result['name'] = pmcid result['doi'] = _get_article_doi(tree) result['article-categories'] = _get_article_categories(tree) result['article-contrib-authors'] = _get_article_contrib_authors(tree) result['article-title'] = _get_article_title(tree) result['article-abstract'] = _get_article_abstract(tree) result['journal-title'] = _get_journal_title(tree) result['article-year'], \ result['article-month'], \ result['article-day'] = _get_article_date(tree) result['article-url'] = _get_article_url(tree) result['article-license-url'], \ result['article-license-text'], \ result['article-copyright-statement'] = _get_article_licensing(tree) result['article-copyright-holder'] = _get_article_copyright_holder(tree) if supplementary_materials: result['supplementary-materials'] = _get_supplementary_materials(tree) yield result
def main(argv): import getopt def usage(): print 'usage: %s [-d] [-p basedir] xml files ...' % argv[0] return 100 try: (opts, args) = getopt.getopt(argv[1:], 'dp:') except getopt.GetoptError: return usage() if not args: return usage() debug = 0 basedir = '.' for (k, v) in opts: if k == '-d': debug += 1 elif k == '-p': basedir = v modules = {} root = ElementTree().parse(args.pop(0)) for module in root.getchildren(): if module.tag != 'module': continue modules[module.get('name')] = module modules[module.get('src')] = module for name in args: try: module = modules[name] except KeyError: print >>sys.stderr, 'not found: %r' % name continue src = os.path.join(basedir, module.get('src')) fp = file(src) annot(fp, module) fp.close() return 0
def parse_operator_xml(f): et = ElementTree(file=f) r = et.getroot() def _get_value_from_first_element(r_, e_name): return r_.findall(e_name)[0].text operator_id = r.attrib["id"] task_id = _get_value_from_first_element(r, "task-id") s = r.findall("scatter")[0] scatter_task_id = _get_value_from_first_element(s, "scatter-task-id") sgs = s.findall("chunks")[0] schunks = [ScatterChunk(x.attrib["out"], x.attrib["in"]) for x in sgs.findall("chunk")] scatter = Scatter(task_id, scatter_task_id, schunks) gs = r.findall("gather")[0].findall("chunks")[0].findall("chunk") def _to_c(x): return ( _get_value_from_first_element(x, "gather-task-id"), _get_value_from_first_element(x, "chunk-key"), _get_value_from_first_element(x, "task-output"), ) gchunks = [GatherChunk(*_to_c(x)) for x in gs] gather = Gather(gchunks) return ChunkOperator(operator_id, scatter, gather)
def exportGEXF(self, fileName): rootNode = Element("gexf") rootNode.attrib['xmlns'] = "http://www.gexf.net/1.2draft" rootNode.attrib['version'] = "1.2" graphNode = Element("graph") graphNode.attrib['mode'] = "static" graphNode.attrib['defaultedgetype'] = "directed" rootNode.append(graphNode) graphNode.append(Node.getGexfAttributeNode()) graphNode.append(Vertex.getGexfAttributeNode()) NodesList = Element("nodes") for n in self.nodes: NodesList.append(n.exportToGexfNode()) graphNode.append(NodesList) EdgesList = Element("edges") for e in self.vertices: EdgesList.append(e.exportToGexfNode()) graphNode.append(EdgesList) doc = ElementTree(rootNode) doc.write(fileName, "utf8", '<?xml version="1.0" encoding="UTF-8"?>')
def pagetext_search(self, locator, query, rows=None, start=None): """Does an advanced search on pagetext:blah locator:identifier where identifier is one of the id's from fulltext search. You get back a list of page numbers like [21, 25, 39].""" def extract(page_id): """TODO: DjVu format is deprecated. Is this function still even used? A page id is something like 'adventsuburbanit00butlrich_0065.djvu', which this function extracts asa a locator and a leaf number ('adventsuburbanit00butlrich', 65). """ g = re.search('(.*)_(\d{4})\.djvu$', page_id) a,b = g.group(1,2) return a, int(b) # try using qf= parameter here and see if it gives a speedup. @@ # pdb.set_trace() query = self._prefix_query('pagetext', query) page_hits = self.raw_search(query, fq='locator:' + locator, rows=rows, start=start) XML = ElementTree() try: XML.parse(StringIO(page_hits)) except SyntaxError as e: raise SolrError(e) page_ids = list(e.text for e in XML.getiterator('identifier')) return [extract(x)[1] for x in page_ids]
def main(): options, args = parse_args() rootElement = Element('packages') packages = {} print "Searching for packages.config files:" for dirpath, subdirs, filenames in walk('src'): for filename in filenames: if filename == 'packages.config': filepath = join(dirpath, filename) print " " + filepath et = parse(filepath) for packageElement in et.findall('package'): pkgId = packageElement.get('id') pkgVersion = packageElement.get('version') packages[pkgId, pkgVersion] = packageElement print print "Writing projectdata/packages.config:" rootElement.extend([value for (key,value) in sorted(packages.items())]) indent(rootElement) tree = ElementTree(rootElement) dump(tree) tree.write('projectdata/packages.config')
class LastParser(object): RSS_URL = "http://ws.audioscrobbler.com/2.0/user/{0}/recenttracks.rss" def __init__(self, user): self.tree = ElementTree() self.tree.parse(urllib2.urlopen(self.RSS_URL.format(user))) def get_songs(self, count=10): l = [] for item in self.tree.getiterator("item"): d = {} for e in item: d[e.tag] = e.text l.append(d) return l[:count] def get_song(self): return self.get_songs(1)[0] def get_titles(self, count=10): l = [title.text for title in self.tree.getiterator("title")] return l[1:count + 1] # removing rss title def get_title(self): return self.get_titles(1)[0]
def parse_operator_xml(f): et = ElementTree(file=f) r = et.getroot() def _get_value_from_first_element(r_, e_name): return r_.findall(e_name)[0].text operator_id = r.attrib['id'] task_id = _get_value_from_first_element(r, 'task-id') s = r.findall('scatter')[0] scatter_task_id = _get_value_from_first_element(s, 'scatter-task-id') sgs = s.findall('chunks')[0] schunks = [ScatterChunk(x.attrib['out'], x.attrib['in']) for x in sgs.findall('chunk')] scatter = Scatter(task_id, scatter_task_id, schunks) gs = r.findall('gather')[0].findall('chunks')[0].findall('chunk') def _to_c(x): return _get_value_from_first_element(x, 'gather-task-id'), _get_value_from_first_element(x, 'chunk-key'), _get_value_from_first_element(x, 'task-output') gchunks = [GatherChunk(*_to_c(x)) for x in gs] gather = Gather(gchunks) return ChunkOperator(operator_id, scatter, gather)
def load(cls, filename): project_root = ElementTree().parse(filename) settings_node = project_root.find(cls.__PROJECT_SETTINGS_NODE_NAME) tracks_node = project_root.find(cls.__PROJECT_TRACKS_NODE_NAME) track_nodes = tracks_node.findall(cls.__PROJECT_TRACK_NODE_NAME) for node in track_nodes: print node
def extract_positions(url): tree = ElementTree() parser = XMLParser(encoding="iso-8859-1") data = urllib.urlopen(url) tree.parse(data, parser=parser) positions = tree.getroot().findall("team") allpos = [] for pos in positions: realpos = pos.find("pos") latitude = float(realpos.attrib['a']) longitude = float(realpos.attrib['o']) speed = float(realpos.attrib['s']) course = float(realpos.attrib['c']) last_update = datetime.utcfromtimestamp(int(realpos.attrib["w"])) dtf = float(realpos.attrib['d']) _id = pos.attrib["id"] # pos = geo.xyz(latitude, latitude) # final object result = {} result["str_latitude"] = format_deg(latitude, "N", "S") result["str_longitude"] = format_deg(longitude, "E", "W") result["speed"] = speed result["course"] = course result["_id"] = _id result["dtf"] = dtf result["last_update"] = last_update allpos.append(result) return allpos
def run(self): settings = [ os.path.join(constant.profile['LOCALAPPDATA'], u'Microsoft Corporation\\Remote Desktop Connection Manager\\RDCMan.settings'), os.path.join(constant.profile['LOCALAPPDATA'], u'Microsoft\\Remote Desktop Connection Manager\\RDCMan.settings') ] for setting in settings: if os.path.exists(setting): self.debug(u'Setting file found: {setting}'.format(setting=setting)) tree = ElementTree(file=setting) root = tree.getroot() pwd_found = [] elements = [ 'CredentialsProfiles/credentialsProfiles/credentialsProfile', 'DefaultGroupSettings/defaultSettings/logonCredentials', 'file/server', ] for element in elements: pwd_found += self.parse_element(root, element) try: for r in root.find('FilesToOpen'): if os.path.exists(r.text): self.debug(u'New setting file found: %s' % r.text) pwd_found += self.parse_xml(r.text) except Exception: pass return pwd_found
def scrape_pkg_uri(self, uri, pkg): """ Scrape package metadata from PyPi when it's running as the Clue Release Manager. Parameters ---------- uri : `str` URI to page containing package's homepage pkg : `str` Package name """ # Example entry: #<div class="distro-block distro-metadata"> # <h4>Metadata</h4> # <dl> # <dt>Distro Index Owner:</dt> # <dd>acmepypi</dd> # <dt>Home Page:</dt> # <dd><a href="http://mysvn/acme.helloworld"> # http://mysvn/acme.helloworld</a></dd> # </dl> #</div> tree = ElementTree() try: tree.parse(urllib2.urlopen(uri)) except urllib2.HTTPError, e: raise UserError("Can't find repository URL for package %s (%s). " "Has it been registered in PyPi?" % (pkg, e))
def dump(self, stream): if self.prettyprint: self.indent(self.xml) document = ElementTree(self.xml) header='<?xml version="1.0" encoding="%s"?>'%self.encoding stream.write(header.encode(self.encoding)) document.write(stream, encoding=self.encoding)
def ReadPropertiesFromFile(filename): """Return a list of controls from XML file filename""" parsed = ElementTree().parse(filename) # Return the list that has been stored under 'CONTROL' props = _read_xml_structure(parsed)['CONTROL'] if not isinstance(props, list): props = [props] # it is an old XML so let's fix it up a little if not ("_version_" in parsed.attrib.keys()): # find each of the control elements for ctrl_prop in props: ctrl_prop['fonts'] = [ _xml_to_struct(ctrl_prop['FONT'], "LOGFONTW"), ] ctrl_prop['rectangle'] = \ _xml_to_struct(ctrl_prop["RECTANGLE"], "RECT") ctrl_prop['client_rects'] = [ _xml_to_struct(ctrl_prop["CLIENTRECT"], "RECT"), ] ctrl_prop['texts'] = _old_xml_to_titles(ctrl_prop["TITLES"]) ctrl_prop['class_name'] = ctrl_prop['CLASS'] ctrl_prop['context_help_id'] = ctrl_prop['HELPID'] ctrl_prop['control_id'] = ctrl_prop['CTRLID'] ctrl_prop['exstyle'] = ctrl_prop['EXSTYLE'] ctrl_prop['friendly_class_name'] = ctrl_prop['FRIENDLYCLASS'] ctrl_prop['is_unicode'] = ctrl_prop['ISUNICODE'] ctrl_prop['is_visible'] = ctrl_prop['ISVISIBLE'] ctrl_prop['style'] = ctrl_prop['STYLE'] ctrl_prop['user_data'] = ctrl_prop['USERDATA'] for prop_name in [ 'CLASS', 'CLIENTRECT', 'CTRLID', 'EXSTYLE', 'FONT', 'FRIENDLYCLASS', 'HELPID', 'ISUNICODE', 'ISVISIBLE', 'RECTANGLE', 'STYLE', 'TITLES', 'USERDATA', ]: del (ctrl_prop[prop_name]) return props
def __init__(self): self._faces = OrderedDict() tree = ElementTree(file=path.face_path + "face.xml") category = "" for face in tree.iterfind("./FACEINFO/"): assert face.tag == "FACE" face = FaceItem(face) if category != face.category: category = face.category self._faces[category] = OrderedDict() else: self._faces[category][face.name] = face size = tree.find("./WNDCONFIG/Align") self._col, self._row = int(size.get("Col")), int(size.get("Row")) self._all_faces_cache = []
def parseStreamListAthanmenu_2(self): Athanlist1 = [] tree = ElementTree() tree.parse(self.xml) for Athan in tree.findall('Contry'): Contnt = str(Athan.findtext('Contnt')) Contr = str(Athan.findtext('Contr')) name = str(Athan.findtext('name')) url = str(Athan.findtext('url')) Id = str(Athan.findtext('Id')) Athanlist1.append({ 'Contnt': Contnt, 'Contr': Contr, 'name': name, 'url': url, 'Id': Id }) return Athanlist1
def _get_list(base_url, method, tag, acceptable_params, debug, **args): """Makes the request and parses the response into a list for its wrapper method""" #check if acceptable criteria was entered (prevents some errors) if not _correct_params(acceptable_params,**args): raise ValueError("At least one parameter with incorrect name given. Check http://devel.yahoo.com/igor/guide/rest_api.html for acceptable parameters") response = _GET(base_url,method,debug,**args) # parse the XML response tree = ElementTree() tree.parse(response) lst = [] for elem in tree.iter(): if elem.tag == tag: lst.append(elem.attrib["name"]) return lst
def parse_xml(self, database_path): xml_file = os.path.expanduser(database_path) tree = ElementTree(file=xml_file) root = tree.getroot() pwd_found = [] elements = ['name', 'protocol', 'host', 'port', 'description', 'login', 'password'] for connection in root.iter('connection'): children = connection.getchildren() values = {} for child in children: for c in child: if str(c.tag) in elements: values[str(c.tag).capitalize()] = str(c.text) if values: pwd_found.append(values) return pwd_found
def run(self): path = os.path.join(constant.profile['HOMEPATH'], u'.dbvis', u'config70', u'dbvis.xml') if os.path.exists(path): tree = ElementTree(file=path) pwd_found = [] elements = { 'Alias': 'Name', 'Userid': 'Login', 'Password': '******', 'UrlVariables//Driver': 'Driver' } for e in tree.findall('Databases/Database'): values = {} for elem in elements: try: if elem != "Password": values[elements[elem]] = e.find(elem).text else: values[elements[elem]] = self.decrypt( e.find(elem).text) except Exception: pass try: elem = e.find('UrlVariables') for ee in elem.getchildren(): for ele in ee.getchildren(): if 'Server' == ele.attrib['UrlVariableName']: values['Host'] = str(ele.text) if 'Port' == ele.attrib['UrlVariableName']: values['Port'] = str(ele.text) if 'SID' == ele.attrib['UrlVariableName']: values['SID'] = str(ele.text) except Exception: pass if values: pwd_found.append(values) return pwd_found
def saveAP(self, apFileName, autoGdlFile): root = Element('font') root.set('upem', str(self.emunits())) root.set('producer', 'graide 1.0') root.text = "\n\n" for g in self.glyphs: if g: g.createAP(root, self, autoGdlFile) ElementTree(root).write(apFileName, encoding="utf-8", xml_declaration=True)
class BaseXML(object): def __init__(self, path): self.path = os.path.splitext( path)[1] == '.xml' and path or path + '.xml' self.xml_tree = None self.xml_root_element = None self.parsed = self.parse_file() def parse_file(self): if not os.path.exists(self.path): return False try: self.xml_tree = ElementTree() self.xml_tree.parse(self.path) self.xml_root_element = self.xml_tree.getroot() except Exception as e: log.error("%s invalid xml file - %s, creating backup..." % (str(self), str(e))) shutil.copy2(os.path.dirname(self.path), self.path + ".bak") return False return True def indent(self, elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " for e in elem: self.indent(e, level + 1) if not e.tail or not e.tail.strip(): e.tail = i + " " if not e.tail or not e.tail.strip(): e.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def write_file(self): if not os.path.exists(os.path.dirname(self.path)): os.makedirs(os.path.dirname(self.path)) self.indent(self.xml_root_element) self.xml_tree = ElementTree(self.xml_root_element).write( self.path, encoding='utf-8')
def parse_input_xml(path): """Parse a SWIG pre-processed XML file """ tree = ElementTree() try: tree.parse(path) except SyntaxError as exc: exc.args = ( "could not parse XML input from '{}': {}".format(str(exc), path), ) raise # find root of preprocessing interface parse tree for incl in tree.findall('include'): if get_swig_attr(incl.find('module'), 'name') == 'swiglal_preproc': return incl raise RuntimeError( "could not find root of preprocessing interface parse tree", )
def run(self): pwd_found = self.get_password_from_dbus() for path in homes.get(file=os.path.join('.purple', 'accounts.xml')): tree = ElementTree(file=path) root = tree.getroot() for account in root.findall('account'): if account.find('name') is not None: name = account.find('name') password = account.find('password') if name is not None and password is not None: pwd_found.append({ 'Login': name.text, 'Password': password.text }) return pwd_found
def parse_xml(self, path): pwd_found = [] if os.path.exists(path): tree = ElementTree(file=path) elements = { 'name': 'Name', 'url': 'URL', 'userName': '******', 'password': '******' } for elem in tree.iter('Bean'): values = {} for e in elem: if e.tag in elements: values[elements[e.tag]] = e.text if values: pwd_found.append(values) return pwd_found
def run(self): path = os.path.join(constant.profile['APPDATA'], u'FileZilla') if os.path.exists(path): pwd_found = [] for file in [ u'sitemanager.xml', u'recentservers.xml', u'filezilla.xml' ]: xml_file = os.path.join(path, file) if os.path.exists(xml_file): tree = ElementTree(file=xml_file) if tree.findall('Servers/Server'): servers = tree.findall('Servers/Server') else: servers = tree.findall('RecentServers/Server') for server in servers: host = server.find('Host') port = server.find('Port') login = server.find('User') password = server.find('Pass') # if all((host, port, login)) does not work if host is not None and port is not None and login is not None: values = { 'Host': host.text, 'Port': port.text, 'Login': login.text, } if password: if 'encoding' in password.attrib and password.attrib[ 'encoding'] == 'base64': values['Password'] = base64.b64decode( password.text) else: values['Password'] = password.text if values: pwd_found.append(values) return pwd_found
def convert(self, markdown_text, levelOffset=0, contentType=""): self.levelOffset = levelOffset self.content = Content() self.contentType = contentType html = markdown(markdown_text, ["fenced_code"]) html = "<div>\n{0}\n</div>".format(html) root = ElementTree().parse(StringIO(html)) for child in root.getchildren(): if child.tag[0] == "h": try: level = int(child.tag[1]) except: continue self.headerHandler(child, level) elif child.tag in self.handlers: self.handlers[child.tag](self, child) else: print "Unknown tag {0}".format(child.tag) return self.content
def _getBranchPermission(base_url, method, acceptable_params, debug, **args): """Makes the request and parses the response for its wrapper method""" #check if acceptable criteria was entered (prevents some errors) if not _correct_params(acceptable_params,**args): raise ValueError("At least one parameter with incorrect name given. Check http://devel.yahoo.com/igor/guide/rest_api.html for acceptable parameters") response = _GET(base_url, method, debug, **args) # parse the XML response tree = ElementTree() tree.parse(response) branch_dic = {} for elem in tree.iter(): if elem.tag == "acl": branch_dic.update(elem.attrib) if elem.tag == "group": branch_dic[elem.text] = elem.attrib["perm"] return Obj(branch_dic)
def __parse_pipeline_template_xml(binding_func, file_name, registered_pipelines): """:rtype: BuilderRecord""" t = ElementTree(file=file_name) r = t.getroot() bindings, task_opts = binding_func(r, registered_pipelines) # Values from XML file. Returned as a [(k, v), ] similar to the bindings task_options = dict(parse_task_options(r)) # Override the pipeline template defined task option defaults with # the values in the XML task_options.update(task_opts) wopts_tlist = parse_workflow_options(r) wopts = dict(wopts_tlist) workflow_options = validate_workflow_options(wopts) return BuilderRecord(bindings, task_options, workflow_options)
def write_records(self, records): """ Overriden """ for r in records: root = Element(u'row') if self.id_col: root.attrib[u'_id'] = text_type(r[u'_id']) for c in self.columns: node_name = get_xml_element(c) self._insert_node(root, node_name, r[c]) ElementTree(root).write(self.response, encoding=u'utf-8') self.response.write(b'\n')
def parse_pipeline_template_xml(file_name, registered_pipelines): """ :param file_name: :rtype: BuilderRecord """ t = ElementTree(file=file_name) r = t.getroot() if _has_template_node(r): # parse template b = _parse_pipeline_template_xml_with_template_id(file_name, registered_pipelines) elif _has_entry_points_and_bindings(r): # Parse explicitly provided bindings and entry points b = _parse_pipeline_template(file_name, registered_pipelines) else: raise ValueError("Unable to find Workflow template id, or explicit bindings and entry points in {f}".format(f=file_name)) return b
def serialize(datadict, roottag='root', defaulttag='item', encoding='utf-8', xml_declaration=True, pretty=False, sort=False): if len(datadict.keys()) == 1: roottag, datadict = datadict.items()[0] root = Element(roottag) _convert_dict_to_xml_recurse(root, datadict, {}, defaulttag, sort) if pretty: _indent(root) tree = ElementTree(root) fileobj = StringIO() tree.write(fileobj, encoding=encoding, xml_declaration=xml_declaration) return fileobj.getvalue()
def run(self): path = os.path.join(constant.profile['APPDATA'], u'SQL Developer') if os.path.exists(path): self._passphrase = self.get_passphrase(path) if self._passphrase: self.debug(u'Passphrase found: {passphrase}'.format(passphrase=self._passphrase)) xml_name = u'connections.xml' xml_file = None if os.path.exists(os.path.join(path, xml_name)): xml_file = os.path.join(path, xml_name) else: for p in os.listdir(path): if p.startswith('system'): new_directory = os.path.join(path, p) for pp in os.listdir(new_directory): if pp.startswith(u'o.jdeveloper.db.connection'): if os.path.exists(os.path.join(new_directory, pp, xml_name)): xml_file = os.path.join(new_directory, pp, xml_name) break if xml_file: renamed_value = {'sid': 'SID', 'port': 'Port', 'hostname': 'Host', 'user': '******', 'password': '******', 'ConnName': 'Name', 'customUrl': 'URL', 'SavePassword': '******', 'driver': 'Driver'} tree = ElementTree(file=xml_file) pwd_found = [] for e in tree.findall('Reference'): values = {} for ee in e.findall('RefAddresses/StringRefAddr'): if ee.attrib['addrType'] in renamed_value and ee.find('Contents').text is not None: name = renamed_value[ee.attrib['addrType']] value = ee.find('Contents').text if name != 'Password' else self.decrypt( ee.find('Contents').text) values[name] = value pwd_found.append(values) return pwd_found
def parse(self): log.info("Parsing: %s" % self.original_file.name.val) provider = self.original_file_provider data = provider.get_original_file_data(self.original_file) try: et = ElementTree(file=data) finally: data.close() root = et.getroot() areas = root.findall(self.AREA_XPATH) log.debug("Area count: %d" % len(areas)) for i, area in enumerate(areas): result_parameters = area.findall(self.PARAMETER_XPATH) log.debug("Area %d result children: %d" % (i, len(result_parameters))) if len(result_parameters) == 0: log.warn("%s contains no analysis data." % self.get_name()) return headers = list() for result_parameter in result_parameters: headers.append(result_parameter.text) columns = self.get_empty_columns(headers) wells = area.findall(self.WELL_XPATH) for well in wells: # Rows and columns are 1-indexed, OMERO wells are 0-indexed row = int(well.get('row')) - 1 column = int(well.get('col')) - 1 try: v = columns['Well'].values wellobj, images = self.get_well_images(row, column) if not wellobj: continue v.append(wellobj.id.val) except: log.exception("ERROR: Failed to get well images") continue results = well.findall(self.RESULT_XPATH) for result in results: name = result.get('name') columns[name].values.append(float(result.text)) return MeasurementParsingResult([columns.values()])
def update_editor(self): """ Updates the editor when the object trait changes externally to the editor. """ value = self.value if isinstance(value, SVGDocument): string_io = StringIO() ElementTree(value.tree).write(string_io) value = string_io.getvalue() self.control.load(QtCore.QByteArray(value))
def writerow(self, row): root = Element(u'row') if self.id_col: root.attrib[u'_id'] = unicode(row[0]) row = row[1:] for k, v in zip(self.columns, row): if v is None: SubElement(root, k).text = u'NULL' continue SubElement(root, k).text = unicode(v) ElementTree(root).write(self.output, encoding=u'utf-8') self.output.write(b'\n')
def run(self): xml_file = self.get_application_path() if xml_file and os.path.exists(xml_file): tree = ElementTree(file=xml_file) pwd_found = [] for elem in tree.iter(): try: if elem.attrib['name'].startswith('ftp') or elem.attrib['name'].startswith('ftps') \ or elem.attrib['name'].startswith('sftp') or elem.attrib['name'].startswith('http') \ or elem.attrib['name'].startswith('https'): encrypted_password = base64.b64decode(elem.attrib['value']) password = Win32CryptUnprotectData(encrypted_password, is_current_user=constant.is_current_user, user_dpapi=constant.user_dpapi) pwd_found.append({ 'URL': elem.attrib['name'], 'Password': password, }) except Exception as e: self.debug(str(e)) return pwd_found
def run(self): path = os.path.join(constant.profile['USERPROFILE'], u'.squirrel-sql', u'SQLAliases23.xml') if os.path.exists(path): tree = ElementTree(file=path) pwd_found = [] elements = { 'name': 'Name', 'url': 'URL', 'userName': '******', 'password': '******' } for elem in tree.iter('Bean'): values = {} for e in elem: if e.tag in elements: values[elements[e.tag]] = e.text if values: pwd_found.append(values) return pwd_found
def run(self, profile): xml_file = self.get_application_path(os.path.join(profile['APPDATA'], 'Cyberduck')) if xml_file and os.path.isfile(xml_file): tree = ElementTree(file=xml_file) pwd_found = [] for elem in tree.iter(): try: if elem.attrib['name'].startswith('ftp') or elem.attrib['name'].startswith('ftps') \ or elem.attrib['name'].startswith('sftp') or elem.attrib['name'].startswith('http') \ or elem.attrib['name'].startswith('https'): encrypted_password = base64.b64decode(elem.attrib['value']) password_bytes = CryptUnprotectData(encrypted_password, profile) pwd_found.append({ 'URL': elem.attrib['name'], 'Password': password_bytes.decode("utf-8"), }) except Exception as e: log.debug(str(e)) return pwd_found
def GetElementsFromXML(self,filename): 'Extracts a dictionary of elements from the gcc_xml file.' tree = ElementTree() try: tree.parse(filename) except ExpatError: raise InvalidXMLError, 'Not a XML file: %s' % filename root = tree.getroot() if root.tag != 'GCC_XML': raise InvalidXMLError, 'Not a valid GCC_XML file' # build a dictionary of id -> element, None elementlist = root.getchildren() elements = {} for element in elementlist: id = element.get('id') if id: elements[id] = element, None return elements
def decorate_item(cls, leaf): # FIXME: Very simplified .savedSearch parsing, so far we only support # the query, without additional filtering. The simplest form of # .savedSearch file is saved by nautilus as following: # <query version="1.0"> # <text>QUERY GOES HERE</text> # </query> if not leaf.object.endswith(".savedSearch"): return None try: et = ElementTree(file=leaf.object) query = et.getroot().find("text").text if not query: return None location_tag = et.getroot().find("location") location = location_tag.text if location_tag is not None else None return cls(query, location=location_uri(location)) except Exception: pretty.print_exc(__name__) return None
def get_tree(self): text = self.get_text().strip() try: root = fromstring(text.encode("UTF-8")) except Exception: # FIXME: show message import traceback traceback.print_exc() return tree = ElementTree(root) indent_tree(root) return tree
def create_new_epg(args, original_epg_filename, m3u_entries): output_str("creating new xml epg for {} m3u items".format( len(m3u_entries))) original_tree = parse(original_epg_filename) original_root = original_tree.getroot() new_root = Element("tv") new_root.set("source-info-name", "py-m3u-epg-editor") new_root.set("generator-info-name", "py-m3u-epg-editor") new_root.set("generator-info-url", "py-m3u-epg-editor") # create a channel element for every channel present in the m3u for channel in original_root.iter('channel'): channel_id = channel.get("id") if any(x.tvg_id == channel_id for x in m3u_entries): output_str("creating channel element for {}".format(channel_id)) new_channel = SubElement(new_root, "channel") new_channel.set("id", channel_id) for elem in channel: new_elem = SubElement(new_channel, elem.tag) new_elem.text = elem.text for attr_key in elem.keys(): attr_val = elem.get(attr_key) new_elem.set(attr_key, attr_val) # now copy all programme elements from the original epg for every channel present in the m3u no_epg_channels = [] for entry in m3u_entries: if entry.tvg_id is not None and entry.tvg_id != "" and entry.tvg_id != "None": output_str("creating programme elements for {}".format( entry.tvg_name)) channel_xpath = 'programme[@channel="' + entry.tvg_id + '"]' for elem in original_tree.iterfind(channel_xpath): if is_in_range(args, elem): programme = SubElement(new_root, elem.tag) for attr_key in elem.keys(): attr_val = elem.get(attr_key) programme.set(attr_key, attr_val) for sub_elem in elem: new_elem = SubElement(programme, sub_elem.tag) new_elem.text = sub_elem.text for attr_key in sub_elem.keys(): attr_val = sub_elem.get(attr_key) new_elem.set(attr_key, attr_val) else: no_epg_channels.append("'{}'".format(entry.tvg_name.lower())) indent(new_root) tree = ElementTree(new_root) save_no_epg_channels(args, no_epg_channels) return tree
def _getHostInfo(base_url, method, acceptable_params, debug, **args): """Makes the request and parses the response for its wrapper method""" #check if acceptable criteria was entered (prevents some errors) if not _correct_params(acceptable_params,**args): raise ValueError("At least one parameter with incorrect name given. Check http://devel.yahoo.com/igor/guide/rest_api.html for acceptable parameters") response = _GET(base_url, method, debug, **args) # parse the XML response tree = ElementTree() tree.parse(response) host_dic = {} for elem in tree.iter(): if elem.tag == "host": host_dic.update(elem.attrib) if elem.tag == "property": if elem.attrib["name"] == "igor_tag": host_dic.update( {"igor_tag" : elem.text} ) return Obj(host_dic)
def updatebq(self): from xml.etree.cElementTree import ElementTree tree = ElementTree() tree.parse(GSXML) tvlist = [] for iptv in tree.findall("iptv"): name = iptv.findtext("name").title() (protocol, serviceType, bufferSize, epgId) = iptv.findtext("type").split(":") uri = iptv.findtext("uri") if protocol in "livestreamer": uri = "http://localhost:88/" + uri uri = uri.replace(":", "%3a") service = "#SERVICE {s}:0:1:{e}:{e}:0:0:0:0:0:{u}:{n}\n".format(s=serviceType,e=epgId,u=uri,n=name) tvlist.append((name,service)) tvlist=sorted(tvlist, key=lambda channel: channel[0]) #sort by name with open(GSBQ, "w") as f: f.write("#NAME GreekStreamTV\n") for (name, service) in tvlist: f.write(service) com = "cat /usr/lib/enigma2/python/Plugins/Satdreamgr/UpdateBouquet/stream.xml ; rm /usr/lib/enigma2/python/Plugins/Satdreamgr/UpdateBouquet/stream.xml" out = os.popen(com) return list