def parseAndAddDynamicLeadTree(FixedURL, mainS=False): DLConn = urllib2.urlopen(urllib2.Request(FixedURL)) MTreeS = DLConn.read() DLConn.close() MTree = ElementTree.fromstring(MTreeS) Slides = MTree.findall('slides') DLSlides = Slides[0].findall('dynamicLeadSlide') for DLSlide in list(DLSlides): Film = DLSlide.findall('film')[0] FilmID = Film.find('id').text Title = Film.find('title').text if mainS == True: Title = u'\u2022 ' + Title Plot = Film.find('logline').text Year = Film.find('year').text Minutes = Film.find('duration_minutes').text Seconds = Film.find('duration_seconds').text Duration = GetFormattedTime(Minutes) + ':' + Seconds Images = Film.find('images') if Images is not None: Image = Images.find('image') strImage = ElementTree.tostring(Image) ImageSrc = re.compile('<image height="317" src="(.+?)" type="android_poster" width="214" />').findall(strImage)[0] else: ImageSrc = '' MPAA = Film.find('rating').text addDir(Title, FilmID, SF_MODE_PLAY, ImageSrc, False, True, False, Plot, Year, Duration, MPAA)
def dotransform(request, response): # Nespose API session login session = nexlogin() # Nexpose site creation sitename = datetime.today().strftime("%Y%m%d-%H%M%S") + '-MaltegoSite' newsite = host_site(sitename, request.value) nexsite = sitesave(session, newsite) resxml = ET.fromstring(nexsite) siteid = resxml.attrib.get('site-id') progress(10) if resxml.attrib.get('success') == '1': # Nexpose Scan Site launchscan = sitescan(session, siteid) launchres = ET.fromstring(launchscan) progress(25) if launchres.attrib.get('success') == '1': for child in launchres: scanid = child.attrib.get('scan-id') status = scanstatus(session, scanid) statusxml = ET.fromstring(status) progress(50) while statusxml.attrib.get('status') == 'running': sleep(5) status = scanstatus(session, scanid) statusxml = ET.fromstring(status) continue progress(100) response += NexposeSite( sitename, siteid=siteid, scanid=scanid, targetip=request.value) return response nexlogout(session)
def parseAndAddFilmsTree(FixedURL): DLConn = urllib2.urlopen(urllib2.Request(FixedURL)) MTreeS = DLConn.read() DLConn.close() MTree = ElementTree.fromstring(MTreeS) NextOffsetSafe = MTree.find('next_offset') if NextOffsetSafe != None: NextOffset = NextOffsetSafe.text PageIndex = MTree.find('page_index').text PageTotal = MTree.find('page_total').text Films = MTree.findall('film') for Film in list(Films): FilmID = Film.find('id').text Title = Film.find('title').text Plot = Film.find('logline').text Year = Film.find('year').text Images = Film.find('images') Image = Images.find('image') strImage = ElementTree.tostring(Image) ImageSrc = re.compile('<image height="317" src="(.+?)" type="android_poster" width="214" />').findall(strImage)[0] Minutes = Film.find('duration_minutes').text Seconds = Film.find('duration_seconds').text Duration = GetFormattedTime(Minutes) + ':' + Seconds MPAA = Film.find('parental_rating').text addDir(Title, FilmID, SF_MODE_PLAY, ImageSrc, False, True, False, Plot, Year, Duration, MPAA) #print 'PageIndex: ' + PageIndex + ' PageTotal: ' + PageTotal + ' NextOffset: ' + NextOffset if PageIndex != PageTotal and NextOffset != None: if mode == SF_MODE_LIST: addDir(__settings__.getLocalizedString(30013), url, mode, next_thumb, True, False, NextOffset) else: addDir(__settings__.getLocalizedString(30013), '', mode, next_thumb, True, False, NextOffset)
def getHitCount(self): """get the number of retrieved pages @return int the number of retrieved pages """ hits = False if self.__engine == "tsubaki": url = "http://tsubaki.ixnlp.nii.ac.jp/api.cgi?query=%s&only_hitcount=1" % self.__query force_dpnd = self.getParameter("force_dpnd") if force_dpnd: url += "&force_dpnd=" + str(force_dpnd) print url if self.is_available_caching(): res = self.runCacheFunc(url) else: res = urllib.urlopen(url).read() if not re.match("^[0-9]+$", res): return "error" hits = res.rstrip() elif self.__engine == "yahoo" or self.__engine == "yahoo2": if self.is_available_caching(): xmlstr = self.runCacheFunc(self.getResultURI(1, 1)) print xmlstr doc = ElementTree(fromstring(xmlstr)) else: url = self.getResultURI(1, 1) fd = file(url, "rb") doc = ElementTree(file=fd) e = doc.getroot() hits = e.attrib["totalResultsAvailable"] return hits
def configure(self): if hasattr(self, 'access_token'): return h = self.http() h.fetch_request_token() url = h.authorize_token() raw_input("Visit this URL to authorize your account:\n\n<%s>\n\nthen press Enter: " % url) self.access_token = h.fetch_access_token() # Confirm that the access token works. h = self.http(self.access_token) response, content = h.request("http://api.netflix.com/users/current") if response.status != 200: raise ValueError('Could not authorize Netflix account') # Find the real user info. culink = ElementTree.fromstring(content) userlink = culink.find('link') userhref = userlink.get('href') # Look for the user name. response, content = h.request(userhref) if response.status != 200: raise ValueError('Could not fetch Netflix account') userdoc = ElementTree.fromstring(content) userid = userdoc.find('user_id').text firstname = userdoc.find('first_name').text lastname = userdoc.find('last_name').text self.userid = userid self.name = ' '.join((firstname, lastname))
def __init__(self, elem): self.elem = elem self.name = self.elem.get('name') self.type = self.elem.get('type') self.default = self.elem.get('default') print et.tostring(self.elem) print "!",self.name, self.type, self.default
def getHostGuestMapping(self): """ Returns dictionary with host to guest mapping, e.g.: { 'host_id_1': ['guest1', 'guest2'], 'host_id_2': ['guest3', 'guest4'], } """ mapping = {} hosts_xml = ElementTree.parse(self.get(self.hosts_url)) vms_xml = ElementTree.parse(self.get(self.vms_url)) for host in hosts_xml.findall('host'): id = host.get('id') mapping[id] = [] for vm in vms_xml.findall('vm'): guest_id = vm.get('id') host = vm.find('host') if host is None: # Guest don't have any host continue host_id = host.get('id') if host_id not in mapping.keys(): self.logger.warning("Guest %s claims that it belongs to host %s which doen't exist" % (guest_id, host_id)) else: mapping[host_id].append(guest_id) return mapping
def load_config(self, account): """ @type account: L{amsn2.core.account_manager.aMSNAccount} """ c = aMSNConfig() c.set_key("ns_server", "messenger.hotmail.com") c.set_key("ns_port", 1863) configpath = os.path.join(self.accounts_dir, self._get_dir(account.view.email), "config.xml") configfile = None try: configfile = file(configpath, "r") except IOError: return c root_tree = ElementTree(file=configfile) configfile.close() config = root_tree.getroot() if config.tag == "aMSNConfig": lst = config.findall("entry") for elmt in lst: if elmt.attrib['type'] == 'int': c.set_key(elmt.attrib['name'], int(elmt.text)) else: c.set_key(elmt.attrib['name'], elmt.text) return c
def main(): #define the options usage = "usage: %prog [options]" version = "%prog 0.2.6" options = [] parser = OptionParser(usage=usage, version=version, option_list=options) #parse the options (options, arguments) = parser.parse_args() first = ET.parse(arguments[0]).getroot() firstChildren = [] for child in first: children = {} for x in child: children[x.tag] = x.text firstChildren.append(children) second = ET.parse(arguments[1]).getroot() secondChildren = [] for child in second: children = {} for x in child: children[x.tag] = x.text secondChildren.append(children) differ = False for child in firstChildren: if not child in secondChildren: differ = True break if differ: print "Score files differ"
def loadAccount(self, email): accview = None self.createAccountFileTree(email) accpath = os.path.join(self.account_dir, "account.xml") accfile = file(accpath, "r") root_tree = ElementTree(file=accfile) accfile.close() account = root_tree.getroot() if account.tag == "aMSNAccount": #email emailElmt = account.find("email") if emailElmt is None: return None accview = AccountView(self._core, emailElmt.text) #nick nickElmt = account.find("nick") if nickElmt is None: return None if nickElmt.text: accview.nick.appendText(nickElmt.text) #TODO: parse... #psm psmElmt = account.find("psm") if psmElmt is None: return None if psmElmt.text: accview.psm.appendText(psmElmt.text) #presence presenceElmt = account.find("presence") if presenceElmt is None: return None accview.presence = presenceElmt.text #password passwordElmt = account.find("password") if passwordElmt is None: accview.password = None else: accview.password = passwordElmt.text #save_password savePassElmt = account.find("save_password") if savePassElmt.text == "False": accview.save_password = False else: accview.save_password = True #autoconnect saveAutoConnect = account.find("autoconnect") if saveAutoConnect.text == "False": accview.autologin = False else: accview.autologin = True #TODO: use backend & all #dp dpElmt = account.find("dp") #TODO #TODO: preferred_ui ? accview.save = True return accview
def query(q_text='',q_fieldtext='',q_databasematch='',q_maxresults=''): delimiter = "\t" # idol server parameters query_action = "Query" query_params = "" query_valid = False if q_text != '': query_params = query_params + "&text=" + q_text query_valid = True if q_fieldtext != '': query_params = query_params + "&fieldtext=" + q_fieldtext query_valid = True if q_text != '': query_params = query_params + "&databasematch=" + q_databasematch if q_text != '': query_params = query_params + "&maxresults=" + str(q_maxresults) if query_valid == False: print "Missing query parameter: text or fieldtext are required." return autnresponse = aci.action(query_action+query_params) response = autnresponse.find("./response").text if response == "SUCCESS": numhits = int(autnresponse.find( "./responsedata/{http://schemas.autonomy.com/aci/}numhits").text) if numhits > 0: print "Hits: "+str(numhits) for hit in autnresponse.findall( "./responsedata/{http://schemas.autonomy.com/aci/}hit"): doc = hit.find("{http://schemas.autonomy.com/aci/}content/DOCUMENT") print ElementTree.tostring(doc) else: print "Query succeeded, but returned no hits: "+ElementTree.tostring(autnresponse) else: print "ACI exception: "+response+"|"+ElementTree.tostring(autnresponse) return autnresponse
def Search(self, srcr, keywords, type, list, lock, message_queue, page='', total_pages=''): import elementtree.ElementTree as ET import urllib from entertainment.net import Net net = Net() keywords = self.CleanTextForSearch(keywords) import re from entertainment import odict search_dict = odict.odict({'search_keywords':keywords}) name = urllib.urlencode(search_dict) id_search = 'http://services.tvrage.com/myfeeds/search.php?key=ag6txjP0RH4m0c8sZk2j&show='+name content = net.http_GET(id_search).content try: root = ET.fromstring(content) except: content = common.CleanText2(content, True, True) root = ET.fromstring(content) shows = root.findall('.//show') for show in shows: show_id = show.find('showid').text show_url = 'http://services.tvrage.com/myfeeds/episode_list.php?key=ag6txjP0RH4m0c8sZk2j&sid='+str(show_id) show_name = show.find('name').text show_year = show.find('started').text self.AddContent(list, srcr, common.mode_Content, show_name, '', 'tv_seasons', url=show_url, name=show_name, year=show_year)
def merge (file, langs): input_helper_name = os.path.join (work_dir, os.path.dirname (file), ".helper", os.path.basename (file) ) input_file_name = os.path.join (work_dir, file) helper_document = ET.parse (input_helper_name).getroot() translated_document = ET.parse (input_file_name).getroot() merge_docs (helper_document, translated_document, langs) document = helper_document remove_ids (document) if not "no-format" in options: create_node_order_helper (document) format_xml (document) all_document = ET.ElementTree (document) all_document.write (file, "utf-8") #write copy with IDs and hash, so that when this file is #next splited, changed tags get marked with "changed" create_new_ids (document) all_document.write (input_helper_name, "utf-8") if not "no-delete-rip" in options: os.unlink (input_file_name)
def loadConfig(account, name): if name == 'General': c = aMSNConfig() c._config = {"ns_server":'messenger.hotmail.com', "ns_port":1863, } configpath = os.path.join(account.account_dir, "config.xml") try: configfile = file(configpath, "r") except IOError: return c root_tree = ElementTree(file=configfile) configfile.close() config = root_tree.getroot() if config.tag == "aMSNConfig": lst = config.findall("entry") for elmt in lst: if elmt.attrib['type'] == 'int': c.setKey(elmt.attrib['name'], int(elmt.text)) else: c.setKey(elmt.attrib['name'], elmt.text) print repr(c._config) return c else: return None
def parseapiVideos(response): if response == False: return False tree1 = ElementTree(fromstring(response)) videos = [] for entry in tree1.getroot().findall('{%s}entry' % atom): video = [] m = entry.find('{%s}content' % media) video.append(m.get('url').split('/')[-1]) name = entry.find('{%s}description' % media).text name = name.split('|')[0] + '-' + name.split('|')[1] video.append(name) turl = [0,'url'] for t in entry.findall('{%s}thumbnail' % media): url = t.get('url') width = t.get('width') height = t.get('height') w = int(width) h = int(height) pixels = w*h if pixels > turl[0]: turl[0] = pixels turl[1] = url if turl[1] <> 'url': video.append(turl[1]) else: video.append('') videos.append(video) return videos
def shows(): url = 'http://www.usanetwork.com/globalNav.xml' showsxml=getHTML(url) xml = ElementTree(fromstring(showsxml)) shows = [] for item in xml.getroot().findall('menu/item')[5]: show = [] show.append(item.get('url')) name = item.get('name').title() show.append(name) if name == 'Burn Notice': thumb = 'http://www.usanetwork.com/fullepisodes/images/bn.gif' elif name == 'Monk': thumb = 'http://www.usanetwork.com/fullepisodes/images/monk.gif' elif name == 'Psych': thumb = 'http://www.usanetwork.com/fullepisodes/images/psych.gif' elif name == 'In Plain Sight': thumb = 'http://www.usanetwork.com/fullepisodes/images/ips.gif' else: thumb = '' show.append(thumb) shows.append(show) #Need better way to get show list missing Starter Wife and couldn't find a feed for Dr Steve-O #show = [] #show.append('http://video.usanetwork.com/player/feeds/?level=743701&type=placement&showall=1') #show.append('Starter Wife') #show.append('http://www.usanetwork.com/fullepisodes/images/sw.gif') #shows.append('show') return shows
def parseapiArtists(response): if response == False: return False tree1 = ElementTree(fromstring(response)) artists = [] for entry in tree1.getroot().findall('{%s}entry' % atom): artist = [] artist.append(entry.find('{%s}id' % atom).text) artist.append(entry.find('{%s}title' % atom).text) turl = [0,'url'] for t in entry.findall('{%s}thumbnail' % media): url = t.get('url') width = t.get('width') height = t.get('height') w = int(width) h = int(height) pixels = w*h if pixels > turl[0]: turl[0] = pixels turl[1] = url if turl[1] <> 'url': artist.append(turl[1]) else: artist.append('') artists.append(artist) return artists
def login(self): if not self.username and not self.password: return True # fall back to free account elif self.username and self.password and len(self.username)>0 and len(self.password)>0: self.info('Login user=%s, pass=*****' % self.username) # get salt headers,req = self._create_request('',{'username_or_email':self.username}) data = util.post(self._url('api/salt/'),req,headers=headers) xml = ET.fromstring(data) if not xml.find('status').text == 'OK': self.error('Server returned error status, response: %s' % data) return False salt = xml.find('salt').text # create hashes password = hashlib.sha1(md5crypt(self.password, salt)).hexdigest() digest = hashlib.md5(self.username + ':Webshare:' + self.password).hexdigest() # login headers,req = self._create_request('',{'username_or_email':self.username,'password':password,'digest':digest,'keep_logged_in':1}) data = util.post(self._url('api/login/'),req,headers=headers) xml = ET.fromstring(data) if not xml.find('status').text == 'OK': self.error('Server returned error status, response: %s' % data) return False self.token = xml.find('token').text self.info('Login successfull') return True return False
def save(self): self.backend_manager.saveConfig(self, self.config, 'General') #TODO: integrate with personnalinfo if self.view is not None and self.view.email is not None: root_section = Element("aMSNAccount") #email emailElmt = SubElement(root_section, "email") emailElmt.text = self.view.email #nick nick = str(self.view.nick) nickElmt = SubElement(root_section, "nick") nickElmt.text = nick #presence presenceElmt = SubElement(root_section, "presence") presenceElmt.text = self.view.presence #password passwordElmt = self.backend_manager.setPassword(self.view.password, root_section) passwordElmt.text = self.view.password #dp #TODO ask the backend dpElmt = SubElement(root_section, "dp", backend='DefaultBackend') #TODO #TODO: save or not, preferred_ui #TODO: backend for config/logs/... if not os.path.isdir(self.account_dir): os.makedirs(self.account_dir, 0700) accpath = os.path.join(self.account_dir, "account.xml") xml_tree = ElementTree(root_section) xml_tree.write(accpath, encoding='utf-8')
def test_et(): """ElementTree""" _table = et.Element('table') for row in table: tr = et.SubElement(_table, 'tr') for c in row.values(): et.SubElement(tr, 'td').text=str(c) et.tostring(_table)
def test_et(): """ElementTree""" _table = et.Element("table") for row in table: tr = et.SubElement(_table, "tr") for c in row.values(): et.SubElement(tr, "td").text = str(c) et.tostring(_table)
def getServers(self): localServers = dict() remoteServers = dict() foundServer = False if self.isAuthenticated(): url = MyPlexService.SERVERS_URL % self.authenticationToken util.logDebug("Finding servers via: "+url) data = util.Http().Get(url) if data: tree = ElementTree.fromstring(data) for child in tree: host = child.attrib.get("address", "") port = child.attrib.get("port", "") localAddresses = child.attrib.get("localAddresses", "") accessToken = child.attrib.get("accessToken", "") machineIdentifier = child.attrib.get("machineIdentifier", "") local = child.attrib.get("owned", "0") sourceTitle = child.attrib.get("sourceTitle", "") util.logInfo("MyPlex found server %s:%s" % (host,port)) foundServer = True server = None if local == "1": #Try the local addresses #TODO: Similiar code exists in the server and this is a bit convoluted.... if localAddresses: localAddresses = localAddresses.split(',') util.logInfo("--> Resolving local addresses") resolved = False for addr in localAddresses: http = util.Http() util.logDebug("--> Trying local address %s:32400" % addr) data = http.Get("http://"+addr+":32400/?X-Plex-Token="+accessToken) if http.GetHttpResponseCode() == -1: data = http.Get("https://"+addr+":32400/?X-Plex-Token="+accessToken) if data: tree = ElementTree.fromstring(data) localMachineIdentifier = tree.attrib.get("machineIdentifier", "") if localMachineIdentifier == machineIdentifier: util.logInfo("--> Using local address %s:32400 instead of remote address" % addr) server = PlexServer(addr, "32400", accessToken) resolved = True break if not resolved: util.logInfo("--> Using remote address %s unable to resolve local address" % host) server = PlexServer(host, port, accessToken) if server is None or not server.isValid(): continue localServers[machineIdentifier] = server else: #Remote server found server = PlexServer(host, port, accessToken, sourceTitle) remoteServers[machineIdentifier] = server return localServers, remoteServers, foundServer
def pollGenerateReport (reportRequestId): soapStr = """<?xml version="1.0" encoding="utf-8"?> <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"> <s:Header> <h:ApplicationToken i:nil="true" xmlns:h="https://adcenter.microsoft.com/v8" xmlns:i="http://www.w3.org/2001/XMLSchema-instance" /> <h:CustomerAccountId i:nil="true" xmlns:h="https://adcenter.microsoft.com/v8" xmlns:i="http://www.w3.org/2001/XMLSchema-instance" /> <h:CustomerId i:nil="true" xmlns:h="https://adcenter.microsoft.com/v8" xmlns:i="http://www.w3.org/2001/XMLSchema-instance" /> <h:DeveloperToken xmlns:h="https://adcenter.microsoft.com/v8">%s</h:DeveloperToken> <h:Password xmlns:h="https://adcenter.microsoft.com/v8">%s</h:Password> <h:UserName xmlns:h="https://adcenter.microsoft.com/v8">%s</h:UserName> </s:Header> <s:Body> <PollGenerateReportRequest xmlns="https://adcenter.microsoft.com/v8"> <ReportRequestId>%s</ReportRequestId> </PollGenerateReportRequest> </s:Body> </s:Envelope>""" % (developertoken, password, username, reportRequestId) # Create the Web service client, and then add the required headers. _service = httplib.HTTPS(host) _service.putrequest("POST", reportProxy) _service.putheader("Accept","text/xml") _service.putheader("Accept","multipart/*") _service.putheader("Content-type", "text/xml; charset=\"UTF-8\"") _service.putheader("Content-length", "%d" % len(soapStr)) _service.putheader("SOAPAction", "PollGenerateReport") _service.putheader("HOST", str(host)) _service.endheaders() # Execute the Web service request. _service.send(soapStr) # Get the response message and results. statuscode, statusmessage, header = _service.getreply() res = _service.getfile().read() response = None if statusmessage == "OK": response = ET.fromstring(res) else: # The method call failed. print soapStr print "PollGenerateReport failed.\n" print "Status Code: ", statuscode, "\n" print "Header: ", header, "\n" print res faultTree = ET.fromstring(res) print faultTree.findtext(".//faultcode"), " ", \ faultTree.findtext(".//faultstring") return response if response else None
def _get_elem(elem): """Assume an ETree.Element object or a string representation. Return the ETree.Element object""" if not ET.iselement(elem): try: elem = ET.fromstring(elem) except Exception: py2and3.print_("Value Error", elem) raise ValueError("Cannot convert to element") return elem
def combine_xml(files): first = None for filename in files: data = ElementTree.parse(os.path.join(sys.argv[1],filename)).getroot() if first is None: first = data else: first.extend(data) if first is not None: return ElementTree.tostring(first)
def _request(self, path, data=None): if isinstance(data, ET._ElementInterface): data = ET.tostring(data) url=self.baseURL + path req = urllib2.Request(url=url, data=data) returned_xml = "" try: returned_xml = self.opener.open(req,timeout=5).read() except: pass return ET.fromstring(returned_xml)
def read(text): print "Safely made it into the read() method!" root = ET.Element('uclassify', xmlns='http://api.uclassify.com/1/RequestSchema', version='1.01') texts = ET.SubElement(root, 'texts') textBase64 = ET.SubElement(texts, 'textBase64', id='text1') textBase64.text = escape(text) readCalls = ET.SubElement(root, 'readCalls', readApiKey=readKey) classify = ET.SubElement(readCalls, 'classify', id='classify1', classifierName='subjects', textId='text1') print ET.tostring(root, encoding='UTF-8') print "End of read() method." return root
def xmlvalid(self, xml, xsd): """Test whether an XML document is valid against an XSD Schema Parameters: * xml: XML content * xsd: pointer to XML Schema (local file path or URL) """ xsd1 = etree.parse(xsd) xsd2 = etree.XMLSchema(xsd1) doc = etree.parse(StringIO(xml)) return xsd2.validate(doc)
def shows(): url = 'http://www.scifi.com/rewind/playlist.xml' showsxml=getHTML(url) xml = ElementTree(fromstring(showsxml)) shows = [] for item in xml.getroot().findall('series'): show = [] show.append(item.find('id').text) show.append(item.find('name').text) show.append(baseurl + item.find('thumbnailUrl').text) shows.append(show) return shows
def train(text, className, new=0): print "Safely made it to the train() method!" root = ET.Element('uclassify', xmlns='http://api.uclassify.com/1/RequestSchema', version='1.01') texts = ET.SubElement(root, 'texts') textBase64 = ET.SubElement(texts, 'textBase64', id='text1') textBase64.text = escape(text) writeCalls = ET.SubElement(root, 'writeCalls', writeApiKey=writeKey, classifierName='subjects') if new == 1: addClass = ET.SubElement(writeCalls, 'addClass', id='add1', className=className) train = ET.SubElement(writeCalls, 'train', id='train1', className=className, textId='text1') print ET.tostring(root, encoding='UTF-8') return root
def parse(source, policy=u""): tree = ElementTree.parse(source) root = tree.getroot() model = Model() handlers = {} schema_metadata_handlers = tuple(getUtilitiesFor(ISchemaMetadataHandler)) field_metadata_handlers = tuple(getUtilitiesFor(IFieldMetadataHandler)) policy_util = getUtility(ISchemaPolicy, name=policy) def readField(fieldElement, schemaAttributes, fieldElements, baseFields): # Parse field attributes fieldName = fieldElement.get('name') fieldType = fieldElement.get('type') if fieldName is None or fieldType is None: raise ValueError("The attributes 'name' and 'type' are required for each <field /> element") handler = handlers.get(fieldType, None) if handler is None: handler = handlers[fieldType] = queryUtility(IFieldExportImportHandler, name=fieldType) if handler is None: raise ValueError("Field type %s specified for field %s is not supported" % (fieldType, fieldName, )) field = handler.read(fieldElement) # Preserve order from base interfaces if this field is an override # of a field with the same name in a base interface base_field = baseFields.get(fieldName, None) if base_field is not None: field.order = base_field.order # Save for the schema schemaAttributes[fieldName] = field fieldElements[fieldName] = fieldElement return fieldName for schema_element in root.findall(ns('schema')): schemaAttributes = {} schema_metadata = {} schemaName = schema_element.get('name') if schemaName is None: schemaName = u"" bases = () baseFields = {} based_on = schema_element.get('based-on') if based_on is not None: bases = tuple([resolve(dotted) for dotted in based_on.split()]) for base_schema in bases: baseFields.update(getFields(base_schema)) fieldElements = {} # Read global fields for fieldElement in schema_element.findall(ns('field')): readField(fieldElement, schemaAttributes, fieldElements, baseFields) # Read fieldsets and their fields fieldsets = [] fieldsets_by_name = {} for subelement in schema_element: if subelement.tag == ns('field'): readField(subelement, schemaAttributes, fieldElements, baseFields) elif subelement.tag == ns('fieldset'): fieldset_name = subelement.get('name') if fieldset_name is None: raise ValueError(u"Fieldset in schema %s has no name" % (schemaName)) fieldset = fieldsets_by_name.get(fieldset_name, None) if fieldset is None: fieldset_label = subelement.get('label') fieldset_description = subelement.get('description') fieldset = fieldsets_by_name[fieldset_name] = Fieldset(fieldset_name, label=fieldset_label, description=fieldset_description) fieldsets_by_name[fieldset_name] = fieldset fieldsets.append(fieldset) for fieldElement in subelement.findall(ns('field')): parsed_fieldName = readField(fieldElement, schemaAttributes, fieldElements, baseFields) if parsed_fieldName: fieldset.fields.append(parsed_fieldName) schema = InterfaceClass(name=policy_util.name(schemaName, tree), bases=bases + policy_util.bases(schemaName, tree), __module__=policy_util.module(schemaName, tree), attrs=schemaAttributes) schema.setTaggedValue(FIELDSETS_KEY, fieldsets) # Save fieldsets # Let metadata handlers write metadata for handler_name, metadata_handler in field_metadata_handlers: for fieldName in schema: if fieldName in fieldElements: metadata_handler.read(fieldElements[fieldName], schema, schema[fieldName]) for handler_name, metadata_handler in schema_metadata_handlers: metadata_handler.read(schema_element, schema) model.schemata[schemaName] = schema return model
def __init__(self, fmt, overwritable=False, extra_bytes=False, extradims=[]): '''Build the :obj:`laspy.util.Format` instance. ''' fmt = str(fmt) self.fmt = fmt self.overwritable = overwritable self.extradims = extradims try: self._etree = etree.Element("Format") except: print( "There was an error initializing the etree instance, XML and " + " Etree methods may throw exceptions.") self._etree = False self.specs = [] self.rec_len = 0 self.pt_fmt_long = "<" self.compressed = False # Try to detect compression. The only values which get passed to # this method which are coercible to integers are point formats. # Try to detect point formats which are equivalent to a valid format # plus 128, which signifies a potential laz file. try: fmt = int(fmt) compression_bit_7 = (fmt & 0x80) >> 7 compression_bit_6 = (fmt & 0x40) >> 6 if (not compression_bit_6 and compression_bit_7): self.compressed = True fmt &= 0x3f fmt = str(fmt) except ValueError: pass if not (fmt in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "VLR", "EVLR", "h1.0", "h1.1", "h1.2", "h1.3", "h1.4", "extra_bytes_struct", "None")): raise LaspyException("Invalid format: " + str(fmt)) if self.fmt == None: return ## Point Fields if fmt in ([str(x) for x in range(11)]): self.format_type = "point format = " + fmt self.build_point_format(fmt) ## VLR Fields if fmt == "VLR": self.format_type = "VLR" self.build_vlr_format(fmt) if fmt == "EVLR": self.format_type = "EVLR" self.build_evlr_format(fmt) ## Header Fields if fmt[0] == "h": self.build_header(fmt) if fmt == "extra_bytes_struct": self.build_extra_bytes_struct() ## Shared self.build_extra_bytes(extra_bytes) self.setup_lookup()
def get_reviews_for_page(self, app_store_id, app_id, app_version, page_num): """ Returns a list of customer reviews on a given page for a given app. Args: app_store_id: the corresponding App Store ID number for a particular country app_id: unique identifier for the app from which to collect customer reviews app_version: version of the app for which to collect customer reviews page_num: current page number """ userAgent = 'iTunes/9.2 (Macintosh; U; Mac OS X 10.6)' front = '%d-1' % app_store_id url = 'http://ax.phobos.apple.com.edgesuite.net/WebObjects/MZStore.woa/wa/viewContentsUserReviews?id=%s&pageNumber=%d&sortOrdering=4&onlyLatestVersion=false&type=Purple+Software' % ( app_id, page_num) request = urllib2.Request(url, headers={ 'X-Apple-Store-Front': front, 'User-Agent': userAgent }) try: u = urllib2.urlopen(request, timeout=30) except urllib2.HTTPError: print 'Can\'t connect to the App Store, please try again later.' raise SystemExit root = ElementTree.parse(u).getroot() reviews = [] for node in root.findall( '{http://www.apple.com/itms/}View/{http://www.apple.com/itms/}ScrollView/{http://www.apple.com/itms/}VBoxView/{http://www.apple.com/itms/}View/{http://www.apple.com/itms/}MatrixView/{http://www.apple.com/itms/}VBoxView/{http://www.apple.com/itms/}VBoxView/{http://www.apple.com/itms/}VBoxView/' ): review = {} review_node = node.find( '{http://www.apple.com/itms/}TextView/{http://www.apple.com/itms/}SetFontStyle' ) if review_node is None: review['review'] = None else: review['review'] = review_node.text version_node = node.find( '{http://www.apple.com/itms/}HBoxView/{http://www.apple.com/itms/}TextView/{http://www.apple.com/itms/}SetFontStyle/{http://www.apple.com/itms/}GotoURL' ) if version_node is None: review['version'] = None else: review['version'] = re.search(r'Version ([^\n^\ ]+)', version_node.tail).group(1) # If the review is for a version of the app other than the current one, then skip it if review['version'] != app_version: continue user_node = node.find( '{http://www.apple.com/itms/}HBoxView/{http://www.apple.com/itms/}TextView/{http://www.apple.com/itms/}SetFontStyle/{http://www.apple.com/itms/}GotoURL/{http://www.apple.com/itms/}b' ) if user_node is None: review['user'] = None else: review['user'] = user_node.text.strip() rating_node = node.find( '{http://www.apple.com/itms/}HBoxView/{http://www.apple.com/itms/}HBoxView/{http://www.apple.com/itms/}HBoxView' ) try: alt = rating_node.attrib['alt'] stars = int(alt.strip(' stars')) review['rating'] = stars except KeyError: review['rating'] = None topic_node = node.find( '{http://www.apple.com/itms/}HBoxView/{http://www.apple.com/itms/}TextView/{http://www.apple.com/itms/}SetFontStyle/{http://www.apple.com/itms/}b' ) if topic_node is None: review['topic'] = None else: review['topic'] = topic_node.text reviews.append(review) return reviews
def get_project(self, pageIndex): """ 클래스 생성자에 초기화된 search값의 오픈소스 프로젝트를 검색한다. pageIndex는 검색결과의 몇번째 페이지를 보여줄 것인지를 결정한다. """ url = self.url + self.search + "&page=" + str(pageIndex) f = urllib.urlopen(url) # Parse the response into a structured XML object tree = ET.parse(f) # Did Ohloh return an error? elem = tree.getroot() error = elem.find("error") if error != None: print 'Ohloh returned:', ET.tostring(error), sys.exit() # project header header = dict() project_list = list() header['items_available'] = elem.find("items_available").text header['items_returned'] = elem.find("items_returned").text header['first_item_position'] = int( elem.find("first_item_position").text) # Output all the immediate child properties of an Account for projects in elem.findall("result/project"): data = dict() data['header'] = header data['id'] = projects.find("id").text data['name'] = projects.find("name").text data['homepage_url'] = projects.find("homepage_url").text #data['description'] = (projects.find("description").text).replace('"','') data['description'] = " TEST " # 로고 정보를 가지고 있지 않은 프로젝트도 있음 if (ET.iselement(projects.find("medium_logo_url"))): data['medium_logo_url'] = projects.find("medium_logo_url").text data['small_logo_url'] = projects.find("small_logo_url").text else: data['medium_logo_url'] = "#" data['small_logo_url'] = "#" data['ohloh_url'] = "https://www.ohloh.net/p/" + data['id'] licenses = list() # Multi-License parsing for item in projects.findall("licenses/license"): license = dict() license['name'] = item.find("name").text license['nice_name'] = item.find("nice_name").text license['license_url'] = self.license_base + item.find( "name").text licenses.append(license) # 라이선스 정보가 아예 없을경우에는 'unknown' 으로 표시함 if (len(licenses) == 0): item = dict() item['name'] = 'unknown' item['nice_name'] = 'unknown' item['license_url'] = '#' licenses.append(item) data['licenses'] = licenses project_list.append(data) ## Return arrays of JSON type string data return project_list def save_result(search_result): """ do save """ return None
def addProductionsOf(r, cg, src, tgt): if r in processed: return for p in src.prods: if p.nt == r: tgt.addProd(p) processed.append(r) for n in cg[r]: addProductionsOf(n, cg, src, tgt) return if __name__ == "__main__": if len(sys.argv) < 4: print 'This tool extracts a portion of a grammar that starts at a given root nonterminal and includes all referenced nonterminals as well.' print 'Usage:' print ' subgrammar <bgf-input> <new-root> [<new-root>...] <bgf-output>' sys.exit(1) bgf = BGF.Grammar() newBgf = BGF.Grammar() bgf.parse(sys.argv[1]) roots = sys.argv[2:-1] print 'Setting root(s) to', roots for r in roots: newBgf.addRoot(r) cg = metrics.getCallGraph(bgf) for r in roots: addProductionsOf(r, cg, bgf, newBgf) ET.ElementTree(newBgf.getXml()).write(sys.argv[-1]) sys.exit(0)
def main(): # --- Process command line options ---- # opts = process_command_line() formatter = \ logging.Formatter( '%(asctime)s %(name)-12s: %(levelname)-8s %(message)s' ) user_data_dir = xplatform.getUserAppDir(const.APP_NAME) if not os.path.exists(user_data_dir): os.mkdir(user_data_dir) log_file = os.path.join(user_data_dir, 'maestro.log') flog_handler = logging.handlers.RotatingFileHandler( log_file, 'a', 50000, 5) flog_handler.setLevel(logging.DEBUG) flog_handler.setFormatter(formatter) logger = logging.getLogger('') logger.addHandler(flog_handler) logger.setLevel(logging.DEBUG) sys.stdout = maestro.util.PseudoFileOut(writeOut) sys.stderr = maestro.util.PseudoFileOut(writeErr) # if os.name == 'nt': # nt_elog_handler = logging.handlers.NTEventLogHandler('Maestro') # nt_elog_handler.setLevel(logging.ERROR) # nt_elog_handler.setFormatter(formatter) # logger.addHandler(nt_elog_handler) try: logo_path = os.path.join(os.path.dirname(__file__), 'maestro', 'gui', 'images', 'splash.png') # --- Bootstrap the environment --- # splash_map = QtGui.QPixmap(logo_path) #splash = QtGui.QSplashScreen(splash_map, QtCore.Qt.WindowStaysOnTopHint) splash = QtGui.QSplashScreen(splash_map) font = QtGui.QFont("Helvetica", 10, QtGui.QFont.Bold) splash.setFont(font) splash.show() splash.showMessage("Loading Maestro by Infiscape", QtCore.Qt.AlignLeft, QtCore.Qt.white) app.processEvents() if os.environ.has_key('MAESTRO_CFG'): site_cfg_file_path = os.environ['MAESTRO_CFG'] else: site_cfg_file_path = os.path.join(const.EXEC_DIR, 'maestro.xcfg') # All platforms use the same name for the Maestro client settings, but # the file comes from a platform-specific location. user_cfg_file_name = 'maestro.xml' user_data_dir = None user_data_dir = xplatform.getUserAppDir(const.APP_NAME) if user_data_dir is not None: if not os.path.exists(user_data_dir): os.makedirs(user_data_dir) user_cfg_file_path = os.path.join(user_data_dir, user_cfg_file_name) else: user_cfg_file_path = user_cfg_file_name gui_settings = gui.guiprefs.GuiPrefs() if not os.path.exists(user_cfg_file_path): try: gui.guiprefs.GuiPrefs.create(user_cfg_file_path, 'maestro') except IOError, ex: QtGui.QMessageBox.warning(None, "Warning", "Failed to create preferences file: %s: %s" \ (user_cfg_file_path, ex.strerror)) user_cfg_file_path = None gui_settings.load(site_cfg_file_path, user_cfg_file_path) user_stanza_path = None # If the GUI settings include a user stanza path setting, pull it out to # go at the begining of the stanza search path. user_stanza_path = gui_settings.get('user_stanza_path', None) if user_stanza_path is not None: user_stanza_path = user_stanza_path.strip().split(os.path.pathsep) site_stanza_path = None # If the GUI settings include a site-wide stanza path setting, pull it # out to go after the user-specific stanza path. site_stanza_path = gui_settings.get('site_stanza_path', None) if site_stanza_path is not None: site_stanza_path = site_stanza_path.strip().split(os.path.pathsep) if user_stanza_path is None: user_stanza_path = [] if site_stanza_path is None: site_stanza_path = [] # Set the base stanza search path. This looks in the user-specified # directories first and then in the site-wide directory list. stanza_path = user_stanza_path + site_stanza_path # Default to using the built-in stanza search path (see below). use_builtin_stanza_path = True # Determine whether the use of the built-in stanza search path has been # disabled. if gui_settings.has_key('use_builtin_stanza_path'): value = gui_settings.get('use_builtin_stanza_path', 'true').strip() if value.lower() == 'false' or value == '0': use_builtin_stanza_path = False if use_builtin_stanza_path: # The remaining ordering of stanza search paths is the same as it # has been since the stanza search path feature was inroduced. stanza_path.append(os.path.join(const.EXEC_DIR, 'stanzas')) stanza_path.append( os.path.join(xplatform.getUserAppDir(const.APP_NAME), 'stanzas')) stanza_path.append( os.path.join(xplatform.getSiteAppDir(const.APP_NAME), 'stanzas')) # Always check to see if the environment variable STANZA_PATH is set. # If it is, append its value to stanza_path. stanza_path_env = [] if os.environ.has_key('STANZA_PATH'): stanza_path.extend(os.environ['STANZA_PATH'].split( os.path.pathsep)) for i in xrange(len(stanza_path)): stanza_path[i] = expandEnv(stanza_path[i]) # Finally, set the stanza path that will be used for the lifetime of # the Maestro GUI application. const.STANZA_PATH = stanza_path print const.STANZA_PATH # If no ensemble has been specified yet, check to see if the GUI # settings contains a default ensemble to use. if opts.ensemble is None and gui_settings.has_key('default_ensemble'): ensemble_str = gui_settings['default_ensemble'] if ensemble_str is not None: default_ensemble = expandEnv(ensemble_str.strip()) # If the default ensemble file is not an absolute path, search in # the current working directory and the user-specific application # data directory for Maestro for the ensemble file. if not os.path.isabs(default_ensemble): # NOTE: We could define an ensemble search path. dirs = ['.', xplatform.getUserAppDir(const.APP_NAME)] for d in dirs: test_name = os.path.join(d, default_ensemble) if os.path.exists(test_name): default_ensemble = test_name break print "NOTE: Using default ensemble %s" % default_ensemble opts.ensemble = default_ensemble else: print "NOTE: No ensemble (default or otherwise) has been specified." def splashProgressCB(percent, message): splash.showMessage("%3.0f%% %s" % (percent * 100, message), QtCore.Qt.AlignLeft, QtCore.Qt.white) app.processEvents() env = maestro.gui.Environment() env.initialize(gui_settings, opts, splashProgressCB) # Close the splash screen. splash.finish(None) # Create and display GUI m = gui.Maestro.Maestro() m.init() element_tree = None if opts.ensemble is not None: try: print "Loading ensemble '%s' ..." % opts.ensemble # Parse XML file. element_tree = ET.ElementTree(file=opts.ensemble) m.setEnsemble( ensemble.Ensemble(element_tree, fileName=opts.ensemble)) print "Ensemble loaded." except IOError, ex: element_tree = None QtGui.QMessageBox.critical(None, "Error", "Failed to read ensemble file %s: %s" % \ (opts.ensemble, ex.strerror))
def __init__(self, debug=False, foampng=False): self.foampng = foampng self.debug = debug self.plane_cnt = 0 self.maximise = True # false = smaller is better self.tree = ET.parse('planes/mig21.vsp') self.root = self.tree.getroot() self.plane = {} self.ranges = {} self.xml = {} self.parts = ['wingsection2', 'vertsection1', 'horzsection1'] self.ranges['wingsection2'] = { 'Span': (1.5, 5.0), 'TC': (0.0, 2.5), 'RC': (2.5, 5.0), 'Sweep': (-10.0, 70.0), 'Dihedral': (-10.0, 10, 0) } self.ranges['vertsection1'] = { 'Span': (1.5, 2.0), 'Sweep': (0.0, 60.0) } self.ranges['horzsection1'] = { 'Span': (1.0, 2.0), 'Sweep': (0.0, 60.0), 'Dihedral': (-20.0, 20, 0) } wing, vertstab, horzstab = None, None, None for elem in self.root.getiterator(): if elem.tag == 'Component': if not elem.find("General_Parms") == None: name = elem.find("General_Parms").find("Name") if name.text == "Wing": wing = elem if name.text == "Vertical Stabilizer": vertstab = elem if name.text == "Horizontal Stabilizer": horzstab = elem sect_cnt = 0 for elem in wing.getiterator(): if elem.tag == "Section": name = 'wingsection' + str(sect_cnt) self.xml[name] = elem sect_cnt += 1 sect_cnt = 0 for elem in horzstab.getiterator(): if elem.tag == "Section": name = 'horzsection' + str(sect_cnt) self.xml[name] = elem sect_cnt += 1 sect_cnt = 0 for elem in vertstab.getiterator(): if elem.tag == "Section": name = 'vertsection' + str(sect_cnt) self.xml[name] = elem sect_cnt += 1
def parse_description_xml(addon): try: # load and parse description.xml file feed = open(os.path.join(addon, "description.xml")) tree = ET.parse(feed) feed.close() elems = tree.getroot() # required tags # (required) guuid: unique identifier of this addon guuids can be generated online at sites such as http://www.famkruithof.net/uuid/uuidgen guid = elems.findtext("guid") #(required) type: type = { 1: "visualization", 2: "skin", 3: "pvrdll", 4: "script", 5: "scraper", 6: "screensaver", 7: "plugin-pvr", 8: "plugin-video", 9: "plugin-music", 10: "plugin-program", 11: "plugin-pictures", 12: "plugin-weather" }.get(int(elems.findtext("type")), "unknown").title() # (required) Title title = elems.findtext("title") # (required) Major.minor.build version = elems.findtext("version") # (required) author name & email. at least one author name is required authors = [elem.attrib for elem in elems.find("authors")] # (required) Short description of addon. summary = elems.findtext("summary") # optional tags # Longer description of addon. Usage instructions should go here if required. try: description = elems.findtext("description") except: description = "" # user defined tags try: tags = " / ".join([elem.text for elem in elems.find("tags")]) except: tags = "" # minimum revision of xbmc your addon will run on. Leave blank all for revisions try: minrevision = elems.findtext("minrevision") except: minrevision = "" # patforms compatible with your addon. xbox, osx, windows, linux, or all try: platforms = " / ".join( [elem.text for elem in elems.find("platforms")]) except: platforms = "" # list any dependancies (such as another addon, your addon may have. minversion and maxversion are optional try: dependencies = [(elem.attrib, elem.text) for elem in elems.find("dependencies")] except: dependencies = [] # (optional) Whatever is put in disclaimer will be shown before download in an ok/cancel dialog. Keep it short and to the point. try: disclaimer = elems.findtext("disclaimer") except: disclaimer = "" # (optional) The License the addon is released under. try: license = elems.findtext("license") except: license = "" del feed, tree, elems, elem #for key in locals().keys(): # print key return locals() except: print_exc()
def handle(self, *args, **options): # load xml file try: xml_doc = et.parse(args[0]) except IndexError: self.stdout.write('No file given\n') return except IOError: self.stdout.write("Could not open file: %s" % args[0]) return # add movies movies = xml_doc.findall("//Movie") for i, movie in enumerate(movies): a = movie.attrib # keep track of imported fields fields = {} new_movie = Movie() try: new_movie.id = int(a["Number"]) new_movie.title = a["OriginalTitle"].strip() except KeyError: self.stdout.write( "Panic! Could not extract Number nor OriginalTitle." + "Skipping title: %s\n" % a) continue new_movie.save() # or relations cannot be assigned # if we can extract imdb id we leave most other fields # empty that can be filled by imdb try: url = a["URL"] new_movie.imdb_id = parse_imdb(url) fields['imdb_id'] = True except (KeyError, AttributeError): # if imdb id is not present we need to copy other # fields fields['imdb_id'] = False if url and len(url) > 2: new_movie.notes = "URL: %s\n" % url.strip() fields['notes'] = True # director try: director_name = a["Director"].strip() try: p = Person.objects.get(name=director_name) except Person.DoesNotExist: # ok we have to fill imdb person ourselves in some cases if director_name == 'David Lynch': imdb_id = 186 elif director_name == 'Carsten Walter': imdb_id = None elif director_name == 'Roger Sommer': imdb_id = None elif director_name == 'Dieter Rhode': imdb_id = None else: raise Exception( "Panic! Manually assign imdb id for person " + "'%s' (%s)\n" % (director_name, new_movie.title)) p = Person(imdb_id=imdb_id, name=director_name) p.save() new_movie.directors.add(p) fields['directors'] = True except KeyError: fields['directors'] = False # country try: country_name = a["Country"].strip() c, created = Country.objects.get_or_create( name=country_name) c.save() new_movie.countries.add(c) fields['countries'] = True except KeyError: fields['countries'] = False # category try: genre_name = a["Category"].strip() g, created = Genre.objects.get_or_create(name=genre_name) g.save() new_movie.genres.add(g) fields['genres'] = True except KeyError: fields['genres'] = False # year try: new_movie.year = int(a["Year"].strip()) fields['year'] = True except (KeyError, ValueError): fields['year'] = False # runtime try: new_movie.runtime = int(a["Length"].strip()) fields['runtime'] = True except (KeyError, ValueError): fields['runtime'] = False # plot (description) try: new_movie.plot = a["Description"].strip() fields['plot'] = True except (KeyError, ValueError): fields['plot'] = False # always import non-imdb fields # seen (checked) try: checked = a["Checked"] if checked == 'True': seen = True elif checked == 'False': seen = False else: raise ValueError() new_movie.seen = seen fields['seen'] = True except (KeyError, ValueError): fields['seen'] = False # date added try: new_movie.added_on = datetime.strptime(a["Date"], '%m/%d/%Y') fields['added_on'] = True except (KeyError, ValueError): fields['added_on'] = False # finally save movie new_movie.save() # log import imported = ' '.join([f for f in fields.keys() if fields[f]]) not_imported = ' '.join([('-%s' % f) for f in fields.keys() if not fields[f]]) self.stdout.write("Imported '%s' (%s %s)\n" % (new_movie.title, imported, not_imported))
def __init__(self, xmlFile): self.tree = et.parse(xmlFile) self.root = self.tree.getroot()
ser = serial.Serial(device, baud, timeout=timeout) rec = wits0(ser) while (1): try: print "Requesting Data" rec.write(globals.PASON_DATA_REQUEST) time.sleep(1) data = rec.read() if len(data) > 0: log = et.fromstring('<log/>') file_name = os.path.join(os.getcwd(), 'wits_log.xml') print "Logging to:", file_name log_file = open(file_name, 'a') print "Data Received" d = data[0] ts = datetime.datetime.now() print ts ts_element = et.Element('timestamp') ts_element.text = str(ts)
def format(file): all_document = ET.parse(file) document = all_document.getroot() create_node_order_helper(document) format_xml(document) all_document.write(file, "utf-8")
def process_command_line(): """ Parse and process the command line options. @returns Dictionary of the found options. """ prog_desc = """ Maestro by Infiscape """ parser = OptionParser(usage="%prog [ options ... ] [ensemble] [stanza]", version=maestro.__version__, description=prog_desc) parser.add_option("-e", "--ensemble", type="string", help="the ensemble to load") parser.add_option("-s", "--stanza", action="append", type="string", help="load the named stanza (multiple allowed)", dest="stanzas") parser.add_option("-l", "--launch-only", type="string", metavar="APP_NAME", help="allow launching of only the named application") # NOTE: Ensuring that the file named using this argument is an absolute # path is not done until the lookup in the stanza store is actually # performed. The idea is to keep the detail of using absolute paths # encapsulated inside the StanzaStore class. parser.add_option( "-L", "--launch-all-from", type="string", metavar="STANZA_FILE", help= "allow launching of only the applications in the identified stanza file" ) parser.add_option("-v", "--view", type="string", help="display the identified view when the GUI opens") parser.add_option("-o", "--override", action="append", type="string", help="override stanza settings", dest="overrides") (opts, pos_args) = parser.parse_args() # For easy use from application launchers where it is not always # convenient to specify command line parameters. if len(pos_args) > 0: if opts.stanzas is None: opts.stanzas = [] for a in pos_args: try: # Positional arguments are supposed to be XML files. We determine # what type of file it is by loading it and looking at the type of # the root element. tree = ET.ElementTree(file=a) type = tree.getroot().tag if 'ensemble' == type: opts.ensemble = a elif 'stanza' == type: opts.stanzas.append(a) except IOError, ex: pass
import pickle import sys def getPosName(e): pos = e.attrib["pos"] if ":" in e.attrib["name"]: m = re.match("(^.*):.*$", e.attrib["name"]) name = m.group(1) else: name = e.attrib["name"] return (pos, name) ipLex = sys.argv[1] ipTree = et.parse(ipLex) ipRoot = ipTree.getroot() op_dir = path(sys.argv[2]) opRoot = et.Element("ccg-lexicon") for attr in ipRoot.attrib: opRoot.attrib[attr] = ipRoot.attrib[attr] opRoot.text = "\n" memD = {} for i in ipRoot: if i.tag == "family": sp = getPosName(i) if sp not in memD: memD[sp] = []
def parseXML(inFileName): doc = etree.parse(inFileName) root = doc.getroot() walk_tree(root, 0, '')
def xml(self): '''Return an XML Formatted string, describing all of the :obj:`laspy.util.Spec` objects belonging to the Format.''' return (etree.tostring(self._etree))
source.text = jobobj.sourceUrl address = SubElement(job, 'adresse') ad = "<br>".join(jobobj.adresse) address.text = ad.encode("utf-8").decode("utf-8") remote = SubElement(job, 'remoteUrl') remote.text = jobobj.remoteUrl contact = SubElement(job, 'contact') contact.text = jobobj.contact email = SubElement(job, 'email') email.text = jobobj.email phone = SubElement(job, 'phone') phone.text = jobobj.phone #TODO: LOGO ! logo_field = jobobj.getField('logo') mimetype = logo_field.getContentType(jobobj) if mimetype.startswith('image/'): logo = SubElement(job, 'logo', mimetype=mimetype, filename=logo_field.getFilename(jobobj)) raw_image = logo_field.get(jobobj, raw=True).aq_inner if isinstance(raw_image.data, str): logo.text = base64.b64encode(raw_image.data) else: logo.text = base64.b64encode(raw_image.data.data) output_file = open('jobs.xml', 'w') output_file.write('<?xml version="1.0"?>') output_file.write(ElementTree.tostring(afpyjobs)) output_file.close()
def writeKML(earth, counties, party): print 'Writing ' + party kml = ET.Element('kml', {'xmlns': 'http://earth.google.com/kml/2.0'}) kmlDocument = ET.SubElement(kml, 'Document') kmlDocumentLookAt = ET.SubElement(kmlDocument, 'LookAt') kmlDocumentLookAtLatitude = ET.SubElement(kmlDocumentLookAt, 'latitude') kmlDocumentLookAtLatitude.text = '43.5' kmlDocumentLookAtLongitude = ET.SubElement(kmlDocumentLookAt, 'longitude') kmlDocumentLookAtLongitude.text = '-71.7' kmlDocumentLookAtRange = ET.SubElement(kmlDocumentLookAt, 'range') kmlDocumentLookAtRange.text = '200000' kmlDocumentLookAtTilt = ET.SubElement(kmlDocumentLookAt, 'tilt') kmlDocumentLookAtTilt.text = '55' kmlDocumentName = ET.SubElement(kmlDocument, 'name') kmlDocumentName.text = 'Michigan ' + partyName(party) + ' Primary' kmlFolder = ET.SubElement(kmlDocument, 'Folder') kmlFolderName = ET.SubElement(kmlFolder, 'name') kmlFolderName.text = 'Michigan Counties' for name, county in counties.iteritems(): kmlPlacemark = ET.SubElement(kmlFolder, 'Placemark') #kmlPlaceName = ET.SubElement( kmlPlacemark, 'name' ) #kmlPlaceName.text = name kmlMultiGeometry = ET.SubElement(kmlPlacemark, 'MultiGeometry') if earth: kmlPoint = ET.SubElement(kmlMultiGeometry, 'Point') kmlPointCoordinates = ET.SubElement(kmlPoint, 'coordinates') kmlPointCoordinates.text = coord(county['centroid']) kmlPolygon = ET.SubElement(kmlMultiGeometry, 'Polygon') kmlOuterBoundaryIs = ET.SubElement(kmlPolygon, 'outerBoundaryIs') kmlLinearRing = ET.SubElement(kmlOuterBoundaryIs, 'LinearRing') kmlCoordinates = ET.SubElement(kmlLinearRing, 'coordinates') kmlCoordinates.text = ' '.join( [coord(point) for point in county['points']]) kmlStyle = ET.SubElement(kmlPlacemark, 'Style') if earth: kmlIconStyle = ET.SubElement(kmlStyle, 'IconStyle') kmlIcon = ET.SubElement(kmlIconStyle, 'Icon') kmlIconHref = ET.SubElement(kmlIcon, 'href') leader = getLeader(county, party) or {'name': 'generic'} kmlIconHref.text = iconBaseUrl + leader['name'] + '-border.png' kmlBalloonStyle = ET.SubElement(kmlStyle, 'BalloonStyle') kmlBalloonText = ET.SubElement(kmlBalloonStyle, 'text') kmlBalloonText.text = htmlBalloon(county, party) kmlLineStyle = ET.SubElement(kmlStyle, 'LineStyle') kmlLineStyleColor = ET.SubElement(kmlLineStyle, 'color') kmlLineStyleColor.text = '40000000' kmlLineStyleWidth = ET.SubElement(kmlLineStyle, 'width') kmlLineStyleWidth.text = '1' kmlPolyStyle = ET.SubElement(kmlStyle, 'PolyStyle') kmlPolyStyleColor = ET.SubElement(kmlPolyStyle, 'color') kmlPolyStyleColor.text = getColor(county, party) kmlTree = ET.ElementTree(kml) kmlfile = open( private.targetKML + ['maps', 'earth'][earth] + '-mi-' + party + '.kml', 'w') kmlfile.write('<?xml version="1.0" encoding="utf-8" ?>\n') kmlTree.write(kmlfile) kmlfile.close()
vars.append(o) if vars: v = ' '.join([term.n3() for term in vars]) else: v = '*' query = "SELECT %s WHERE { %s %s %s }" % \ (v, s.n3(), p.n3(), o.n3()) self.resetQuery() if self.context_aware and context is not None: self.addDefaultGraph(context.identifier) self.setQuery(query) doc = ElementTree.parse(SPARQLWrapper.query(self).response) # ElementTree.dump(doc) for rt, vars in TraverseSPARQLResultDOM(doc, asDictionary=True): yield (rt.get(s, s), rt.get(p, p), rt.get(o, o)), None def triples_choices(self, (subject, predicate, object_), context=None): """ A variant of triples that can take a list of terms instead of a single term in any slot. Stores can implement this to optimize the response time from the import default 'fallback' implementation, which will iterate over each term in the list and dispatch to triples. """ raise NotImplementedError('Triples choices currently not supported') def __len__(self, context=None):
def configure(self, config_file): """Configure simulated partitions. Arguments: config_file -- xml configuration file """ def _get_node_card(name): if not self.node_card_cache.has_key(name): self.node_card_cache[name] = NodeCard(name) return self.node_card_cache[name] self.logger.info("configure()") try: system_doc = ElementTree.parse(config_file) except IOError: self.logger.error("unable to open file: %r" % config_file) self.logger.error("exiting...") sys.exit(1) except: self.logger.error("problem loading data from file: %r" % config_file) self.logger.error("exiting...") traceback.print_exc(file=sys.stdout) sys.exit(1) system_def = system_doc.getroot() if system_def.tag != "BG": self.logger.error("unexpected root element in %r: %r" % (config_file, system_def.tag)) self.logger.error("exiting...") sys.exit(1) # that 32 is not really constant -- it needs to either be read from cobalt.conf or from the bridge API NODES_PER_NODECARD = 32 # initialize a new partition dict with all partitions # partitions = PartitionDict() tmp_list = [] part_key = {} # this is going to hold partition objects from the bridge (not our own Partition) wiring_cache = {} bp_cache = {} for partition_def in system_def.getiterator("Block"): # skip duplicated partition if part_key.has_key(partition_def.get("name")): continue else: part_key[partition_def.get("name")] = "" node_list = [] switch_list = [] for nc in partition_def.getiterator("NodeBoard"): node_list.append(_get_node_card(nc.get("id"))) nc_count = len(node_list) # skip 1K mesh partition if its a default partition file if config_file == "partition_xml/2013-10-10-mira_pure.xml": if NODES_PER_NODECARD * nc_count == 1024 and self.check_mesh( partition_def.get("name")): continue if not wiring_cache.has_key(nc_count): wiring_cache[nc_count] = [] wiring_cache[nc_count].append(partition_def.get("name")) for s in partition_def.getiterator("Switch"): switch_list.append(s.get("id")) tmp_list.append( dict( name=partition_def.get("name"), queue=partition_def.get("queue", "default"), size=NODES_PER_NODECARD * nc_count, node_cards=node_list, switches=switch_list, state="idle", )) partitions.q_add(tmp_list) # find the wiring deps for size in wiring_cache: for p in wiring_cache[size]: p = partitions[p] s1 = sets.Set(p.switches) for other in wiring_cache[size]: other = partitions[other] if (p.name == other.name): continue s2 = sets.Set(other.switches) if s1.intersection(s2): self.logger.info( "found a wiring dep between %s and %s", p.name, other.name) partitions[p.name]._wiring_conflicts.add(other.name) # update object state self._partitions.clear() self._partitions.update(partitions) print "Total partitions: ", len(self._partitions)
def parse_xml(fname): """Returns a parsed xml tree""" tree = ElementTree.parse(fname) root = tree.getroot() ElementInclude.include(root) return tree
def parse_xml_string(xml_string): tree = ElementTree.fromstring(xml_string) return tree
def readConfiguration(cfg): config = ElementTree.parse(cfg) # shortcuts for xmlnode in config.findall('//shortcut'): shortcuts[xmlnode.findtext('name')] = expandxml( xmlnode.findall('expansion')[0], {}) # actions for xmlnode in config.findall('//target/branch/*/perform'): if xmlnode.text not in actions: actions.append(xmlnode.text) # automated actions for xmlnode in config.findall('//target/branch/*/automated'): if xmlnode.findtext('result') not in actions: actions.append(xmlnode.findtext('result')) autoactions[xmlnode.findtext('result')] = xmlnode.findtext( 'method') # testset for xmlnode in config.findall('//testset'): testsets[xmlnode.findtext('name')] = expandxml( xmlnode.findall('command')[0], {}) # sources for xmlnode in config.findall('//source'): orderedsrc.append(xmlnode.findtext('name')) if xmlnode.findall('derived'): derived[xmlnode.findtext('name')] = ( xmlnode.findtext('derived/from'), xmlnode.findtext('derived/using')) extractor[xmlnode.findtext('name')] = expandxml( xmlnode.findall('grammar/extraction')[0], {}) if xmlnode.findall('grammar/parsing'): parser[xmlnode.findtext('name')] = expandxml( xmlnode.findall('grammar/parsing')[0], {}) if xmlnode.findall('grammar/evaluation'): evaluator[xmlnode.findtext('name')] = expandxml( xmlnode.findall('grammar/evaluation')[0], {}) if xmlnode.findall('tree/extraction'): treeExtractor[xmlnode.findtext('name')] = expandxml( xmlnode.findall('tree/extraction')[0], {}) if xmlnode.findall('tree/evaluation'): treeEvaluator[xmlnode.findtext('name')] = expandxml( xmlnode.findall('tree/evaluation')[0], {}) tmp = [] for theset in xmlnode.findall('testing/set'): tmp.append(theset.text) tester[xmlnode.findtext('name')] = tmp[:] # targets for xmlnode in config.findall('//target'): name = xmlnode.findtext('name') targets[name] = [] for br in xmlnode.findall('branch'): for phase in br.findall('*'): if phase.tag == 'input': branch = Chain(br.findtext('input')) else: for p in phase.findall('*'): if p.tag == 'perform': branch.append(p.text) ttype[p.text] = phase.tag elif p.tag == 'automated': branch.append(p.findtext('result')) ttype[p.findtext('result')] = phase.tag else: print '[WARN] Unknown tag skipped:', p.tag targets[name].append(branch) # tools & methods for xmlnode in config.findall('//tools/*'): #print 'Processing tool',xmlnode.tag if xmlnode.tag == 'generator': automethods[xmlnode.findtext('name')] = expandxml( xmlnode.findall('command')[0], {}) else: tools[xmlnode.tag] = expandxml(xmlnode.findall('grammar')[0], {}) if xmlnode.findall('tree'): treeTools[xmlnode.tag] = expandxml( xmlnode.findall('tree')[0], {}) if xmlnode.findall('extension'): tools['extension'] = xmlnode.findtext('extension') print 'Read', if shortcuts: print len(shortcuts), 'shortcuts,', if tools or treeTools: print ` len(tools) ` + '+' + ` len(treeTools) `, 'tools,', if actions: if autoactions: print len( actions), 'actions (' + ` len(autoactions) `, 'automated),', else: print len(actions), 'actions,', if automethods: print len(automethods), 'generators,', if targets: print len(targets), 'targets,', if testsets: print len(testsets), 'test sets,', if extractor: print len(extractor), 'sources,', if parser or evaluator: print len(parser), 'parsers &', len(evaluator), 'evaluators,', print 'LCF is fine.'
#!/usr/bin/python import os import sys import slpsXPath import slpsns import elementtree.ElementTree as ET names = [] if __name__ == "__main__": if len(sys.argv) != 4: print 'This tool generates an overview of a bunch of BGF sources and targets.' print 'Usage:' print ' bgfover <lcf> <bgfs-path> <xpath>' sys.exit(1) lcf = ET.parse(sys.argv[1]) for x in lcf.findall(sys.argv[3]): name = x.findtext('name') names.append(name) path = sys.argv[2] if path[-1]!='/': path += '/' print '''\\begin{tabular}{l|c|c|c|c} &\\numberOfProductions &\\numberOfNonterminals &\\numberOfTops &\\numberOfBottoms \\\\\\hline\\hline ''' for x in names: print '\\emph{'+x+'}&'+slpsXPath.runxpath(path+x+'.bgf',slpsXPath.productions)+'&'+slpsXPath.runxpath(path+x+'.bgf',slpsXPath.nonterminals)+'&'+slpsXPath.runxpath(path+x+'.bgf',slpsXPath.top)+'&'+slpsXPath.runxpath2(path+x+'.bgf',slpsXPath.bottom)+'\\\\\\hline'
class ArchiveItem: """ <metadata> <collection>opensource_movies</collection> <mediatype>movies</mediatype> <title>My Home Movie</title> <runtime>2:30</runtime> <director>Joe Producer</director> </metadata> """ # *** # old constructor signature: # #def __init__(self, uploader, identifier, collection, mediatype, # title, runtime=None, adder=None, license=None): # *** def __init__(self, uploader, license=None): """Initialize the submision; uploader should be an instance of UploadApplication""" self.files = [] self.uploader = uploader self.__identifier = None self.collection = None self.mediatype = None self.title = None self.metadata = {} self.metadata['licenseurl'] = license self.archive_url = None def __setitem__(self, key, value): if key == 'subjects': subjects = [n.strip() for n in value.split(',')] self.metadata['subject'] = subjects else: self.metadata[key] = value def __getitem__(self, key): return self.metadata[key] def __getIdentifier(self): """Return the current IA identifier for the submission, or None if an identifier has not been successfully set.""" return self.__identifier def __setIdentifier(self, identifier): """Check if the identifier is available by calling create. If it is, store the FTP information and return True. If the identifier is not available or does not meet standards, throw an exception.""" if pyarchive.identifier.conforms(identifier) and \ pyarchive.identifier.available(identifier): self.__identifier = identifier return True raise Exception() identifier = property(__getIdentifier, __setIdentifier) def addFile(self, filename, source, format=None, claim=None): self.files.append(ArchiveFile(filename, source, format, claim)) # set the running time to defaults if 'runtime' in self.metadata: self.files[-1].runtime = self.metadata['runtime'] # return the added file object return self.files[-1] def metaxml(self, username=None): """Generates _meta.xml to use in submission; returns a file-like object.""" # define a convenience handle to XML escape routine xe = xml.sax.saxutils.escape meta_out = StringIO.StringIO() result = codecs.getwriter('UTF-8')(meta_out) result.write('<metadata>') # write the required keys result.write( u""" <identifier>%s</identifier> <title>%s</title> <collection>%s</collection> <mediatype>%s</mediatype> <resource>%s</resource> <upload_application appid="%s" version="%s" /> """ % (self.identifier, xe(self.title), self.collection, self.mediatype, self.mediatype, self.uploader.application, self.uploader.version)) if username is not None: result.write(u"<uploader>%s</uploader>\n" % username) # write any additional metadata for key in self.metadata: if self.metadata[key] is not None: value = self.metadata[key] # check if value is a list if type(value) in [types.ListType, types.TupleType]: # this is a sequence for n in value: result.write(u'<%s>%s</%s>\n' % (key, xe(str(n)), key)) else: result.write(u'<%s>%s</%s>\n' % (key, xe(str(value)), key)) result.write(u'</metadata>\n') result.seek(0) meta_out.seek(0) return meta_out def filesxml(self): """Generates _files.xml to use in submission; returns a file-like object.""" result = StringIO.StringIO() result.write('<files>\n') for archivefile in self.files: result.write(archivefile.fileNode()) result.write('</files>\n') result.seek(0) return result def sanityCheck(self): """Perform sanity checks before submitting to archive.org""" # check for required fields if self.identifier is None: raise MissingParameterException("No identifier specified.") if self.collection is None: raise MissingParameterException("No collection specified.") if self.mediatype is None: raise MissingParameterException("No mediatype specified.") if self.metadata['licenseurl'] is None: raise MissingParameterException("No licenseurl specified.") # check that fields were specified if len(self.files) < 1: raise MissingParameterException("No files selected.") # perform sanity checks for each file for archivefile in self.files: archivefile.sanityCheck() def createSubmission(self, username, identifier): """Create a new submission at archive.org. If successful returns a tuple containing (server, path).""" retry_count = 0 new_url = "/create.php" headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent": self.uploader.user_agent } params = urllib.urlencode({ 'xml': 1, 'user': username, 'identifier': identifier }) conn = httplib.HTTPConnection('www.archive.org') conn.request('POST', new_url, params, headers) while retry_count < MAX_RETRY: try: resp = conn.getresponse() response = resp.read() # successfully read break except (socket.error, httplib.HTTPException), e: # increment the retry count retry_count = retry_count + 1 # short delay to prevent hammering server... time.sleep(1) # make sure we were successful if retry_count == MAX_RETRY: # unsuccessful raise exceptions.CommunicationsError( "Unable to create submission.") # parse the response try: result = etree.fromstring(response) except xml.parsers.expat.ExpatError, e: # XML format error occurred... raise our own Exception raise SubmissionError("Invalid response format.", response)
def __main__(): base_dir = os.path.join(os.getcwd(), "bacteria") try: base_dir = sys.argv[1] except: print "using default base_dir:", base_dir organisms = {} for result in os.walk(base_dir): this_base_dir, sub_dirs, files = result for file in files: if file[-5:] == ".info": dict = {} info_file = open(os.path.join(this_base_dir, file), 'r') info = info_file.readlines() info_file.close() for line in info: fields = line.replace("\n", "").split("=") dict[fields[0]] = "=".join(fields[1:]) if 'genome project id' in dict.keys(): if dict['genome project id'] not in organisms.keys(): organisms[dict['genome project id']] = { 'chrs': {}, 'base_dir': this_base_dir } for key in dict.keys(): organisms[dict['genome project id']][key] = dict[key] else: if dict['organism'] not in organisms.keys(): organisms[dict['organism']] = { 'chrs': {}, 'base_dir': this_base_dir } organisms[dict['organism']]['chrs'][ dict['chromosome']] = dict ##get UCSC data URL = "http://archaea.ucsc.edu/cgi-bin/das/dsn" try: page = urllib.urlopen(URL) except: print "#Unable to open " + URL print "?\tunspecified (?)" sys.exit(1) text = page.read() try: tree = ElementTree.fromstring(text) except: print "#Invalid xml passed back from " + URL print "?\tunspecified (?)" sys.exit(1) builds = {} #print "#Harvested from http://archaea.ucsc.edu/cgi-bin/das/dsn" #print "?\tunspecified (?)" for dsn in tree: build = dsn.find("SOURCE").attrib['id'] try: org_page = urllib.urlopen( "http://archaea.ucsc.edu/cgi-bin/hgGateway?db=" + build).read().replace( "\n", "").split("<table border=2 cellspacing=2 cellpadding=2>" )[1].split("</table>")[0].split("</tr>") except: print "NO CHROMS FOR", build continue org_page.pop(0) if org_page[-1] == "": org_page.pop(-1) for row in org_page: chr = row.split("</a>")[0].split(">")[-1] refseq = row.split("</a>")[-2].split(">")[-1] for org in organisms: for org_chr in organisms[org]['chrs']: if organisms[org]['chrs'][org_chr]['chromosome'] == refseq: if org not in builds: builds[org] = {'chrs': {}, 'build': build} builds[org]['chrs'][refseq] = chr #print build,org,chr,refseq print ext_to_edit = [ 'bed', 'info', ] for org in builds: print org, "changed to", builds[org]['build'] #org info file info_file_old = os.path.join(base_dir + org, org + ".info") info_file_new = os.path.join(base_dir + org, builds[org]['build'] + ".info") old_dir = base_dir + org new_dir = base_dir + builds[org]['build'] #open and edit org info file info_file_contents = open(info_file_old).read() info_file_contents = info_file_contents + "build=" + builds[org][ 'build'] + "\n" for chrom in builds[org]['chrs']: info_file_contents = info_file_contents.replace( chrom, builds[org]['chrs'][chrom]) for result in os.walk(base_dir + org): this_base_dir, sub_dirs, files = result for file in files: if file[0:len(chrom)] == chrom: #rename file old_name = os.path.join(this_base_dir, file) new_name = os.path.join( this_base_dir, builds[org]['chrs'][chrom] + file[len(chrom):]) move(old_name, new_name) #edit contents of file, skiping those in list if file.split(".")[-1] not in ext_to_edit: continue file_contents = open(new_name).read() file_contents = file_contents.replace( chrom, builds[org]['chrs'][chrom]) #special case fixes... if file[-5:] == ".info": file_contents = file_contents.replace( "organism=" + org, "organism=" + builds[org]['build']) file_contents = file_contents.replace( "refseq=" + builds[org]['chrs'][chrom], "refseq=" + chrom) #write out new file file_out = open(new_name, 'w') file_out.write(file_contents) file_out.close() #write out org info file and remove old file org_info_out = open(info_file_new, 'w') org_info_out.write(info_file_contents) org_info_out.close() os.unlink(info_file_old) #change org directory name move(old_dir, new_dir)
def processWordDef (lineIn): allwords = getWords (lineIn) encoding = chardet.detect(allwords[3])['encoding'] root = ET.fromstring (allwords[3].decode(encoding, 'replace').encode('utf-8')) #add audio reference bodyElem = root.find("body") pron_word = str('/dictionary/Pronunciation/' + 'pron_' + allwords[1] + '.mp3').encode('utf-8') print pron_word audioElem=ET.Element ('audio', id = 'dicAudio') audioElem.set('src', pron_word) audioElem.set('autoplay', 'true') bodyElem.append(audioElem) scriptElem=ET.Element('script') if allwords[2] == "YD": bodyElem.set("onclick", "playsound()") soundscript = "function playsound(){document.getElementById('dicAudio').play();document.getElementById('readThru').play();}" scriptElem.text = soundscript #add readThrough readthru_file = str('/dictionary/ReadthroughAudio/' + 'fd_' + allwords[0] + '.mp3').encode('utf-8') print readthru_file readThru_audioElem=ET.Element ('audio', id = 'readThru') readThru_audioElem.set('src', readthru_file) readThru_audioElem.set('controls', 'true') readThru_audioElem.set('autoplay', 'true') bodyElem.append(readThru_audioElem) else: addon_script = "document.querySelectorAll('img.pron-icon')[0].setAttribute ('onclick', 'playsound()');function playsound(){document.getElementById('dicAudio').play();}" scriptElem.text = addon_script bodyElem.append(scriptElem) #process images for imgElement in root.findall ('.//img'): imgSrc = imgElement.get('src') imgFile = dictDir + 'Images/' + imgSrc if (os.path.exists(imgFile)): newImgSrc = "data:image/jpg;base64," + base64.encodestring(open(imgFile,"rb").read()) else: #miss image file newImgSrc=imgSrc #print newImgSrc imgElement.set('src', newImgSrc) #change cross reference for xrefElement in root.findall ('.//span'): if xrefElement.get('class') == "xref": xlinkElem = xrefElement.find ('a') xlinkRef = xlinkElem.get('href') for entry in wordFormTable: if xlinkRef == entry[2]: #assign the real word form xlinkRef = entry[1] newXlinkRef = '/dictionary/' + allwords[2] + '/' + xlinkRef + '.html' xlinkElem.set('href', newXlinkRef) tree = ET.ElementTree(root) tree.write (wwwDir+allwords[2]+'/'+ allwords[1] + '.html', encoding='utf-8')
def xml(self): return (etree.tostring(self.etree()))
# mandatory - activity # mandatory - party # Validate data source uri if not options.inputRIFCS_XML: parser.error("Requires inputRIFCS_XML. Try --help for usage") os.sys.exit(-1) if len(options.inputRIFCS_XML) < 1: parser.error("Requires inputRIFCS_XML. Try --help for usage") os.sys.exit(-1) input_XML = options.inputRIFCS_XML tree = ET.parse(input_XML) root = tree.getroot() noRelatedObjectFoundFile = openFile("summary_no_related_object_found.txt", "a") unencodeableTextFoundFile = openFile("summary_unencodeable_text_found.txt", "a") noRelatedObjectKeys = list( ) # contains all keys for which no related object could be found in the current feed # Find all registry objects namespace = "{http://ands.org.au/standards/rif-cs/registryObjects}" registryObjects = root.findall('.//{0}registryObject'.format(namespace)) print("%d registryObjects found" % len(registryObjects)) extractToFile(input_XML, "party")
def get_info(videoID): """ retrouve les informations de l'ID de video fourni """ print "get_videos called for video ID: %d" % videoID #print "get_info - Retrieving page:" + videoplayerURL +"getVideosLiees/%d"%videoID elems = ET.parse( urllib.urlopen(videoplayerURL + "getVideosLiees/%d" % videoID)).getroot() type = 'rtmp' #print elems #print elems.tag #print ET.tostring(elems) #print elems.find( "VIDEO" ) for videoinfos in elems.findall("VIDEO"): videoinfos_id = int(videoinfos.findtext("ID")) videoinfos_type = videoinfos.findtext("TYPE") videoinfos_title = videoinfos.findtext( "INFOS/TITRAGE/TITRE") #.encode("cp1252") videoinfos_description = videoinfos.findtext( "INFOS/DESCRIPTION") #.encode("cp1252") videoinfos_videoHD = videoinfos.findtext("MEDIA/VIDEOS/HAUT_DEBIT") videoinfos_videoBD = videoinfos.findtext("MEDIA/VIDEOS/BAS_DEBIT") videoinfos_videoHLS = videoinfos.findtext("MEDIA/VIDEOS/HLS") videoinfos_videoHDS = videoinfos.findtext("MEDIA/VIDEOS/HDS") videoinfos_videoMOBILE = videoinfos.findtext("MEDIA/VIDEOS/MOBILE") videoinfos_publication_date = videoinfos.findtext( "INFOS/PUBLICATION/DATE") videoinfos_image = videoinfos.findtext("MEDIA/IMAGES/GRAND") videoinfos_smallimage = videoinfos.findtext("MEDIA/IMAGES/PETIT") videoinfos_categorie = videoinfos.findtext("RUBRIQUAGE/CATEGORIE") if videoinfos_id == videoID: # Found print "Video ID found" if videoinfos_type == "VOD PROG": type = 'http' videoHD_URL = videoinfos_videoHD videoBD_URL = videoinfos_videoBD else: type = 'rtmp' videoHD_URL = videoinfos_videoHD.replace( "rtmp://vod-fms.canalplus.fr", "rtmp://vod-fms.canalplus.fr:1935").replace(".flv", "") videoBD_URL = videoinfos_videoBD.replace( "rtmp://vod-fms.canalplus.fr", "rtmp://vod-fms.canalplus.fr:1935").replace(".flv", "") #print "videoHD_URL = %s"%videoHD_URL #print "videoBD_URL = %s"%videoBD_URL return { 'title': videoinfos_title, 'summary': videoinfos_description, 'publication_date': videoinfos_publication_date, 'image.url': videoinfos_image, 'smallimage.url': videoinfos_smallimage, 'theme': videoinfos_categorie, 'video.stream_server': "", 'video.hi': videoHD_URL, 'video.low': videoBD_URL, 'video.mobile': videoinfos_videoMOBILE, 'video.hds': videoinfos_videoHDS, 'video.hls': videoinfos_videoHLS, } print "get_info - video info not FOUND - ERROR" return None