def _parseXML(activity, local, server): def nodeText(node): text = '' for child in node.childNodes: if child.nodeType is child.TEXT_NODE: text += child.data if text == '1': return True elif text == '0': return False elif text.startswith(u'Nível'): return int(text.split(' ')[-1]) else: return text url = _getPath('xml/pai.xml', local, server) paiXML = domparse(urlopen(url)) nodes = paiXML.documentElement OITiposPAI = [node for node in nodes.childNodes if node.nodeType == paiXML.ELEMENT_NODE] for tipo in OITiposPAI: elements = [node for node in tipo.childNodes if node.nodeType == paiXML.ELEMENT_NODE] d = dict((str(x.nodeName), nodeText(x)) for x in elements) codPAI = d.pop('cod_pai') if codPAI == activity: d.pop('arquivo') d['quantidade'] = int(d['quantidade']) return d raise Exception('Atividade %s não encontrada no arquivo pai.xml.' % activity.upper())
def _plan(self, timeslice: datetime) -> Sequence[DbTimetableStop]: _log.debug('{}/plan/{}/{:02}{:02}{:02}/{:02}'.format(self.apiurl, self.eva_id, timeslice.year % 100, timeslice.month, timeslice.day, timeslice.hour)) with urlopen(Request('{}/plan/{}/{:02}{:02}{:02}/{:02}'.format(self.apiurl, self.eva_id, timeslice.year % 100, timeslice.month, timeslice.day, timeslice.hour), headers=self.headers)) as u: d = domparse(u) return [DbTimetableStop.from_domnode(s) for s in d.getElementsByTagName('s')]
def get_flickerphoto(photoid): from urllib2 import urlopen from xml.dom.minidom import parse as domparse apiurl = 'http://api.flickr.com/services/rest/?method=flickr.photosets.getPhotos&api_key=%(apikey)s&photoset_id=%(pset)s&privacy_filter=1&per_page=%(per_page)s&page=%(page)s&extras=url_t,url_m,url_o,url_sq,url_s' dom = domparse(urlopen(apiurl % dict(pset=pset, per_page=per_page, page=page, apikey=APIKEY))) photos = [] for node in dom.getElementsByTagName('photo'): photos.append({ 'id': node.getAttribute('id'), 'title': node.getAttribute('title'), 'thumb': node.getAttribute('url_t'), 'small': node.getAttribute('url_s'), 'medio': node.getAttribute('url_m'), 'original': node.getAttribute('url_o'), 'square': node.getAttribute('url_sq'), }) return photos
def get_flickerphoto(photoid): from urllib2 import urlopen from xml.dom.minidom import parse as domparse apiurl = 'http://api.flickr.com/services/rest/?method=flickr.photosets.getPhotos&api_key=%(apikey)s&photoset_id=%(pset)s&privacy_filter=1&per_page=%(per_page)s&page=%(page)s&extras=url_t,url_m,url_o,url_sq,url_s' dom = domparse(urlopen(apiurl % dict(pset=pset, per_page=per_page, page=page, apikey=APIKEY))) photos = [] for node in dom.getElementsByTagName('photo'): photos.append({ 'id': node.getAttribute('id'), 'title': node.getAttribute('title'), 'thumb': node.getAttribute('url_t'), 'small': node.getAttribute('url_s'), 'medio': node.getAttribute('url_m'), 'original': node.getAttribute('url_o'), 'square': node.getAttribute('url_sq'), }) return photos
def rchg(self) -> Sequence[DbTimetableStop]: _log.debug('{}/rchg/{}'.format(self.apiurl, self.eva_id)) with urlopen(Request('{}/rchg/{}'.format(self.apiurl, self.eva_id), headers=self.headers)) as u: d = domparse(u) return [DbTimetableStop.from_domnode(s) for s in d.getElementsByTagName('s')]