def torinfo_movie(rssurl,movie): try: try: xml = parse(urllib.urlopen(rssurl)).getroot() except: rssurl = rssurl.replace('%20720p', 'dvd') xml = parse(urllib.urlopen(rssurl)).getroot() hreflist = xml.getiterator('enclosure') titlelist = xml.getiterator('title') #count = 0 #hrefctr = len(hreflist) #while (count < hrefctr): hrefret = hreflist[0].attrib['url'] title = titlelist[1].text title = title.lower() #titlematch = "%s-" % searcher #if titlematch not in title: # break #else: # count = count + 1 html = urllib.urlopen(hrefret).read() soup = BeautifulSoup(html) for link in soup.find_all('a'): if link.has_key('href') and link.has_key('title'): if link['title'] == "Magnet link": magnet = link['href'] magnet = "d10:magnet-uri%s:%se" % (len(magnet),magnet) return (hrefret,title,magnet) except BaseException: print searcher #t, v, tb = sys.exc_info() #print "exception on line", tb.tb_lineno, t, v, rssurl return ("no-url","no-title","no-file")
def __init__( self, textxml = "quran-uthmani.xml", propxml = "quran-uthmani.xml" ): """ init the API based on XMLfile @param source: the path of the xml file """ self.mushaf = parse( textxml ).getroot() self.info = parse( propxml ).getroot()
def __init__(self, textxml="quran-uthmani.xml", propxml="quran-uthmani.xml"): """ init the API based on XMLfile @param source: the path of the xml file """ self.mushaf = parse(textxml).getroot() self.info = parse(propxml).getroot()
def is_xhtml_asn(page): from elementtree.ElementTree import parse import xml.parsers.expat as expat from StringIO import StringIO try: parse(StringIO(page)) return True except expat.ExpatError, ex: print "XHTML problem:\n", ex return False
def __init__(self, thesaurus_file): self.__tree = parse(thesaurus_file) self.all_languages = set([ x.get('language') for x in self.__tree.findall('.//%slangstring' % NS) ]) self.update()
def get_boris_data(): url = 'http://www.tfl.gov.uk/tfl/businessandpartners/syndication/[email protected]&feedId=12' xmlfile = urllib.urlopen(url) doc = parse( xmlfile ).getroot() for element in doc: ''' 0 id 1 name 2 terminalName 3 lat 4 long 5 installed 6 installDate 7 removalDate 8 temporary 9 nbBikes 10 nbEmptyDocks 11 nbDocks ''' data = { 'id' : int(element[0].text), 'name' : element[1].text, 'terminalName' : element[2].text, 'lat' : float(element[3].text), 'lng' : float(element[4].text), 'nbBikes' : element[9].text, 'nbEmptyDocks' : int(element[10].text), 'nbDocks' : int(element[11].text), } yield data
def parse_stats(self, xmlfile): data = { } stats = parse(xmlfile).getroot() schema = re.sub(r'^\s+|\s+$', '', stats.find('schema').text) schema = re.sub(r'\s+', '_', schema) for entry in stats.findall('solr-info/CORE/entry'): for stat in entry.findall('stats/stat'): if stat.attrib['name'] in ['numDocs', 'maxDoc']: attr = stat.attrib['name'] val = re.sub(r'^\s+|\s+$', '', stat.text) data['%s_%s' % (schema, attr)] = val for entry in stats.findall('solr-info/QUERYHANDLER/entry'): name = re.sub(r'^\s+|\s+$', '', entry.find('name').text) name = re.sub(r'^org\.apache\.solr\.handler\.', '', name) name = re.sub(r'^/', '', name) name = re.sub(r'/', '_', name) if name != 'standard': continue entry_data = { } for stat in entry.findall('stats/stat'): if stat.attrib['name'] in ['requests', 'errors', 'timeouts', 'avgTimePerRequest', 'avgRequestsPerSecond']: attr = stat.attrib['name'] val = re.sub(r'^\s+|\s+$', '', stat.text) entry_data['%s_%s' % (schema, attr)] = val data.update(entry_data) return data
def _insertTranslations(): xml = file(sys.argv[1]) vdex = parse(xml) root = vdex.getroot() terms = {} keys = {} for term in root.findall(".//%sterm" % NS): terms[term[0].text.lower()] = term for translation_filename in sys.argv[2:]: language = translation_filename[27:29].lower() xls = get_xls(translation_filename) keys[language] = [] for i in range(1, xls.nrows): row = xls.row(i) id = row[0].value.lower().strip().lower() keys[language].append(id) translation = row[3].value.strip() assert translation term = terms[id] found = False for language_term in term.findall("./%scaption/%slangstring" % (NS, NS)): if language_term.attrib["language"] == language: language_term.text = translation if found: assert False, "TWICE???" found = True assert found ref_ids = keys.values()[0] for ids in keys.values(): assert ref_ids == ids vdex.write("new.vdex", "utf-8")
def parse_manifest(url): """ given a url (http: or file:) get the manifest file and parse it """ global logfile file = urllib.urlopen(url) #file.get_code() not added until python 2.6 rss = parse(file).getroot() #code = urllib.getcode() #print "code=", code if rss.tag != 'SmoothStreamingMedia': raise Error(ErrorCodes.MissingRootTag, "root tag is " + rss.tag + " expecting SmoothStreamingMedia") major = rss.get('MajorVersion') minor = rss.get('MinorVersion') if int(major) != 2 or int(minor) != 1: print >> logfile, "warning: unexpected manifest version: major=", major, "minor=", minor streams = [] elements = rss.findall('StreamIndex') for element in elements: qualities = [] durations = [] manifest_output = element.get('ManifestOutput') if manifest_output != None: # the spec says this value is case-insensitive manifest_output = manifest_output.lower() data = { 'Type': element.get('Type'), 'Url': element.get('Url'), 'ManifestOutput' : manifest_output, 'Qualities': qualities, 'Durations': durations } for quality in element.findall('QualityLevel'): cattrs = [] for custom in quality.findall('CustomAttributes'): for attr in custom.findall('Attribute'): cattr = (attr.get("Name"), attr.get("Value")) cattrs.append(cattr) qdata = { 'Bitrate': quality.get('Bitrate'), 'Cattrs': cattrs } qualities.append(qdata) #note: duration attr might not be there for non-audio/video # streams (i.e. text streams, name=GSIS) for duration in element.findall('c'): dur_value = '0' dur_attr = duration.get('d') if dur_attr != None: dur_value = dur_attr durations.append({ 'Duration': dur_value, 'start': duration.get('t') }) streams.append(data) return streams
def RemoverCredential(self,Aluno): try: tree = parse("/var/lib/jenkins/credentials.xml") elem = tree.getroot() domain = elem.find("domainCredentialsMap") entry = domain.find("entry") Permissions = entry.find("java.util.concurrent.CopyOnWriteArrayList") chaveUsuario = Permissions.findall("com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey") encontrou = 0 for c in chaveUsuario: if encontrou == 1: break busca = c.findall("username") for b in busca: if Aluno in b.text: Permissions.remove(c) log.info("[+] Credential do aluno %s removida",Aluno) encontrou = 1 break else: if encontrou == 0: log.warning("[!] Credential nao encontrada") tree.write("/var/lib/jenkins/credentials.xml") except Exception as e: log.error("[-] Erro ao remover a credential %s",e)
def _insertTranslations(): xml = file(sys.argv[1]) vdex = parse(xml) root = vdex.getroot() terms = {} keys = {} for term in root.findall('.//%sterm' % NS): terms[term[0].text.lower()] = term for translation_filename in sys.argv[2:]: language = translation_filename[27:29].lower() xls = get_xls(translation_filename) keys[language] = [] for i in range(1, xls.nrows): row = xls.row(i) id = row[0].value.lower().strip().lower() keys[language].append(id) translation = row[3].value.strip() assert translation term = terms[id] found = False for language_term in term.findall('./%scaption/%slangstring' % (NS, NS)): if language_term.attrib['language'] == language: language_term.text = translation if found: assert False, "TWICE???" found = True assert found ref_ids = keys.values()[0] for ids in keys.values(): assert ref_ids == ids vdex.write('new.vdex', 'utf-8')
def main(): with open('{}/{}'.format(SCRIPT_DIR, XCCDF_FILE), 'r') as f: tree = parse(f) if not os.path.isdir(NOTE_OUTPUT_DIR): os.makedirs(NOTE_OUTPUT_DIR) for group_element in tree.findall(".//{}Group".format(XCCDF_NAMESPACE)): rule_element = group_element.find("{}Rule".format(XCCDF_NAMESPACE)) # Build a dictionary with all of our rule data. rule = { 'id': group_element.attrib['id'], 'version': rule_element.find("{}version".format(XCCDF_NAMESPACE)).text, 'title': rule_element.find("{}title".format(XCCDF_NAMESPACE)).text, } file_path = "{}/{}.rst".format(NOTE_OUTPUT_DIR, rule['id']) # Skip this rule if the file already exists if os.path.isfile(file_path): continue front_matter = "---\nid: {0}\nstig_id: {1}\nstatus: {2}\ntag: {3}\n---\n".format(rule['id'], rule['version'], "none", "misc") body_content = "\n{}\n".format(rule['title']) with open(file_path, 'wb') as f: f.write(front_matter.encode('utf-8')) f.write(body_content.encode('utf-8'))
def get_tags_all(path, topics): #vec = [] nodes = [] tagdict = {} for topic in topics: targetpath = path + "xml/" + topic + ".xml" targetXML = open(targetpath, 'r') tree = parse(targetXML) root = tree.getroot() for neighbor in root.getiterator('photo'): #rank = neighbor.attrib.get('rank') photoid = neighbor.attrib.get( 'id') ## -------> add node with phto id tags = neighbor.attrib.get('tags') tagvec = tags.split() #if not len(tagvec) == 0: #vec.append(tagvec) tagdict[photoid] = tagvec nodes.append(photoid) #tagdict = dict(zip(nodes, vec)) #print len(tagdict), len(nodes) #return vec, nodes return tagdict, nodes
def __call__(self): file = open(os.path.join(os.path.dirname(__file__), 'atom.xml')) et = parse(file) file.close() et alsoProvides(et, IAtomFeedSkeleton) return et
def AdicionarUsuario(self,Aluno): """ Método que cria o usuário no jenkins, atualmente o jenkins está vinculado com o ldap do sistema ead, por mais que o usuário esteja criado no jenkins, ele não conseguirá autenticar se não estiver cadastrado no LDAP também. Para fazer o cadastro do aluno é adicionada mais uma linha do /var/lib/jenkins/config.xml dentro das tags <permission> somente com permissão de leitura. :param Aluno: Aluno é uma string somente com o email do aluno. :returns: Esse método não possui valor de retorno """ try: tree = parse("/var/lib/jenkins/config.xml") elem = tree.getroot() perm = elem.find('authorizationStrategy') busca = perm.findall("permission") for b in busca: if Aluno in b.text: log.warning("[!] Usuario Ja cadastrado no jenkins") return user = Element("permission") user.text = "hudson.model.Hudson.Read:%s"%Aluno perm.append(user) tree.write("/var/lib/jenkins/config.xml") log.info("[+] Usuario %s adicionado ao jenkins com Sucesso",Aluno) except Exception as e: log.error("[-] Erro ao adicionar usuario ao jenkins %s",e)
def read_settings_file(self): #read custom settings file if it exists xml_data = None try: xml_data = parse(self.settings_file_path).getroot() except: debug_print ("Couldn't find or read custom settings file") if not xml_data == None : try: self.password = xml_data.find('password').text except: pass try: self.port = int(xml_data.find('port').text) except: pass try: self.wakerurl = xml_data.find('wakerurl').text except: pass debug_print("The password = "******"Running on port: " , self.port
def _handleXML(data): # # How should data == '' be handled ??? # if data == '': data = '<e> </e>' l = None if DEBUG: print data x = parse(StringIO.StringIO(data)) mode = x.getroot().tag if x.getroot().attrib.has_key("user"): user = x.getroot().attrib["user"] else: user = "" if mode == 'tags': l = [ dict(count=t.attrib["count"], tag=t.attrib["tag"]) for t in x.findall("tag") ] elif mode == "result": if (x.getroot().attrib.has_key("code") and x.getroot().attrib["code"] == 'done') or x.getroot().text in ['done', 'ok']: l = True else: l = False elif mode == 'update': l = x.getroot().attrib['time'] elif mode == 'dates': l = [ dict(count=t.attrib["count"], date=t.attrib["date"]) for t in x.findall("date") ] elif mode == 'bundles': l = [ dict(name=t.attrib["name"], tags=t.attrib["tags"]) for t in x.findall("bundle") ] elif mode == 'posts': l = posts() for t in x.findall("post"): href, description, hash = '', '', '' tag, time, extended = '', '', '' count = '' if t.attrib.has_key("href"): href = t.attrib["href"] if t.attrib.has_key("description"): description = t.attrib["description"] if t.attrib.has_key("hash"): hash = t.attrib["hash"] if t.attrib.has_key("tag"): tag = t.attrib["tag"] if t.attrib.has_key("time"): time = t.attrib["time"] if t.attrib.has_key("extended"): extended = t.attrib["extended"] if t.attrib.has_key("count"): count = t.attrib["count"] p = post(href=href, description=description, hash=hash, tag=tag, time=time, extended=extended, count=count, user=user) l.append(p) return l
def _ordervdex(): if len(sys.argv) > 1: vdex_filename = sys.argv[1] else: vdex_filename = "out.xml" vdex = parse(vdex_filename) orderNode(vdex.getroot()) vdex.write(vdex_filename, "utf-8")
def __init__(self, input): self.filename = os.path.split(input)[-1] self.xmltree = parse(input).getroot() self.TODAY = date.today().isoformat() self.tag_names = [ 'ID', 'creator', 'created_date', 'modified_date', 'weight', 'aplha', 'BQP', 'BQP_buffer', 'cell_removal', 'spp_file' ]
def _xmltoxls(): if len(sys.argv) > 1: xml = parse(sys.argv[1]) else: xml = parse('out.xml') out = csv.DictWriter(file('out.csv', 'w'), ['id', 'levela', 'levelb', 'levelc', 'leveld', 'levele', 'levelf', 'levelg', 'levelh', 'translations']) for term in xml.findall('.//%sterm' % NS): id = term.find('%stermIdentifier' % NS).text level = 'level' + id[-1].lower() caption = [x.text for x in term.findall('%scaption/%slangstring' % (NS, NS)) if x.attrib['language'] == 'en'][0].encode('ascii', 'ignore') translations = len(set([x.text for x in term.findall('%scaption/%slangstring' % (NS, NS))])) new_row = {} new_row['id'] = id new_row[level] = caption new_row['translations'] = translations print id, new_row.keys() out.writerow(new_row)
def _ordervdex(): if len(sys.argv) > 1: vdex_filename = sys.argv[1] else: vdex_filename = 'out.xml' vdex = parse(vdex_filename) orderNode(vdex.getroot()) vdex.write(vdex_filename, 'utf-8')
def getXml(url): try: myxml = parse(urllib.urlopen(url)).getroot() return myxml except Exception, e: print 'could not retrieve feed: ' + str(e) return 0
def readTagAndSince(filename, headertag='EcalCondHeader'): '''Read tag and since from EcalCondHeader in XML file ''' root = parse(filename).getroot() header = root.find(headertag) since = header.findtext('since') tag = header.findtext('tag') return tag,since
def parse_xml(file): file_out = [] dates = [] actions = [] dates_trimmed = [] pos = -1 print file tree = parse(file) doc = tree.getroot() rin = doc.find('RIN_INFO/RIN') agency = doc.find('RIN_INFO/PARENT_AGENCY/ACRONYM') sub_agency = doc.find('RIN_INFO/AGENCY/ACRONYM') # rule_title=doc.find('RIN_INFO/RULE_TITLE') priority = doc.find('RIN_INFO/PRIORITY_CATEGORY') for action in doc.findall('RIN_INFO/TIMETABLE_LIST/TIMETABLE/TTBL_ACTION'): actions.append(action.text.rstrip()) for date in doc.findall('RIN_INFO/TIMETABLE_LIST/TIMETABLE/TTBL_DATE'): dates.append(date.text.rstrip()) for position, item in enumerate(actions): if (item.lower()[0:12] == 'final action' or item.lower()[0:10] == 'final rule') and ( item.lower()[0:20] != 'final rule effective' or item.lower()[0:22] != 'final action effective'): pos = position if pos == -1: file_out.append('0') else: file_out.append('1') file_out.append(str(len(dates))) try: dates_trimmed = [dates[0], dates[pos]] except: dates_trimmed = ['.', '.'] try: file_out.append(rin.text.rstrip()) except: file_out.append('.') try: file_out.append(agency.text.rstrip()) except: file_out.append('.') try: file_out.append(sub_agency.text.rstrip()) except: file_out.append('.') # try: # file_out.append(rule_title.text.rstrip()) # except: # file_out.append('') try: file_out.append(priority.text.rstrip()) except: file_out.append('.') file_out.append(dates_trimmed[0]) file_out.append(dates_trimmed[1]) return (file_out)
def getFont(self): '''Parse the current skin's Font.xml file for a list of font names by size''' log('Loading font set from current skin: ' + xbmc.getSkinDir()) fontName = '' # Use letterHeight (reasoning: WIDTH may be elastic in the future) desiredSize = self.letterHeight * 1 / 2 # Decent ratio fallback = 'font' # Font.xml can be in any resolution folder, keep trying until we find # one. Use the first Font.xml we come across. skinDir = xbmc.translatePath("special://skin/") for item in os.listdir(skinDir): fontFile = os.path.join(skinDir, item, 'Font.xml') if not os.path.exists(fontFile): continue try: root = parse(fontFile).getroot() except: continue for set in root.findall('fontset'): # Now that we've found the file, use the Default fontset # (guaranteed to exist regardless of skin) if 'id' not in set.attrib or set.attrib['id'] != 'Default': continue # Add one so we don't lie when saying "smaller" (versus "smaller than or equal to") log('Font set loaded. Searching for a font smaller than %dpt' % (desiredSize + 1)) # Index the discovered fonts into two categories fontsWithoutStyle = {} fontsWithStyle = {} for font in set.findall('font'): if font.find('size') == None or font.find('name') == None: continue size = int(font.find('size').text) # Skip fonts larger than the desired size if size > desiredSize: continue if not font.find('style'): fontsWithoutStyle[size] = font.find('name').text else: fontsWithStyle[size] = font.find('name').text # Categories generated. Prefer unstyled fonts if len(fontsWithoutStyle): max = sorted(fontsWithoutStyle.keys(), reverse=True)[0] log('Using unstyled font "%s" (%dpt)' % (fontsWithoutStyle[max], max)) return fontsWithoutStyle[max] elif len(fontsWithStyle): max = sorted(fontsWithStyle.keys(), reverse=True)[0] log('Using styled font "%s" (%dpt)' % (fontsWithStyle[max], max)) return fontsWithStyle[max] log('No suitable fonts found. Falling back to ' + fallback) return fallback log('Default font set not found. Falling back to ' + fallback) return fallback log('Font.xml not found. Falling back to ' + fallback) return fallback
def __init__(self): self.feed = [] url = "http://twitter.com/statuses/public_timeline.xml" self.xml = parse(urllib2.urlopen(url)) map(self._parseNode, self.xml.getiterator('status')) kml = googlegeo.createKML(self.feed) kmlFile = codecs.open('feed.xml', 'w', "utf-8") kmlFile.write(unicode(kml)) kmlFile.close()
def loadVariableList(project): tree = parse(CONFIG_PATH + project + CONFIG_EXTENSION) variableRoot = tree.getroot()[0]; variableList = [[0 for x in xrange(2)] for x in xrange(len(variableRoot))] rowNumber = 0; for variable in variableRoot: variableList[rowNumber][0] = variable.attrib[XML_DATATYPE] variableList[rowNumber][1] = variable.attrib[XML_URL] rowNumber += 1 return variableList
def ranges_generator(filexml): ''' Processes xml file and stores info in ranges contained in g''' trace('Processing ' + filexml) tree = parse(filexml) allranges = tree.findall('range') for xmlrange in allranges: trace('processing xml row') #try: yield RangePos(xmlrange)
def loadMappingList(project): tree = parse(CONFIG_PATH + project + CONFIG_EXTENSION) mappingRoot = tree.getroot()[0]; mappingList = [0 for x in xrange(len(mappingRoot))] rowNumber = 0; for variable in mappingRoot: mappingList[rowNumber] = variable.attrib["name"] rowNumber += 1 return mappingList
def get_latest_song(): unpicklefile = open(PICKLE_FILE, "r") unpickleditems = pickle.load(unpicklefile) unpicklefile.close() try: myxml = parse(urllib.urlopen(feed_url)).getroot() except Exception, e: print "could not retrieve feed: %s" % str(e) myitems["Title"] = 0 return 0
def readScriptConfig(): """ Read the config from the config directory, when not already exist init the default path test the F drive """ config = None ### faire un bouton update from internet ... copy.. if os.path.exists(CONFIG_FULL_PATH): # parse config file = open(CONFIG_FULL_PATH, "r") tree = parse(file) elemConfig = tree.getroot() # 'config' logvalue = elemConfig.findtext('logvalue') intLogValue = 0 # Set the global log value LOG_VALUE if logvalue != None: intLogValue = int(intLogValue) LOG_VALUE = intLogValue mediaNode = elemConfig.find('media') mediaUpdateURL = mediaNode.get('updateurl') podcastNode = elemConfig.find('podcast') podcastDownload = podcastNode.get('download') ispodcastDownload = None if podcastDownload != None: ispodcastDownload = podcastDownload.lower() == 'true' # 0 or 1 podcastDownloadPath = podcastNode.get('localpath') # init the config config = UserConfig(podcastDownloadPath, ispodcastDownload, mediaUpdateURL, intLogValue) else: #create the default config outPrint('Unable to find the config, create a default') config = UserConfig(None, None, None, None) # save it saveScriptConfig(config) # Test the media location path, use alternative otherwise if os.path.exists(config.podcastDownloadPath) == False: retc = createDirectory(config.podcastDownloadPath, False) if retc == False: dialog2 = xbmcgui.Dialog() dialog2.ok('Warning', 'Cannot Access to:', config.podcastDownloadPath, 'Will not be able to download podcasts.') return config
def process_xml_file(filexml, g): ''' Processes xml file and stores info in ranges contained in g''' trace('Processing ' + filexml) tree = parse(filexml) allranges = tree.findall('range') for xmlrange in allranges: trace('processing xml row') #try: r = RangePos(xmlrange) g.add_range(r)
def exposed_getRSS(self, url): message ='' # get data from XML rss = parse(urllib.urlopen(url)).getroot() for data in rss.findall('gempa'): magnitude = data.find('Magnitude').text jumlah = data.find('Jumlah').text message = message + 'Magnitude\t: '+magnitude+ ' Jumlah\t: '+jumlah+'\n' print'\n---------------' return message;
def fix_net_file(net_file, r, edges, n_lanes, lane_width): xml = parse(net_file) lanes = xml.findall("edge/lane") for l in lanes: if l.get("id")[:4] in ["edg1", "edg2"]: edge, lane = l.get("id").split("_") lane = int(lane) length = get_half_circle_lane_length(r, edges, n_lanes, lane, lane_width) l.set("length", str(length)) with open(net_file, "w") as out_file: xml.write(out_file)
def _xmltoxls(): if len(sys.argv) > 1: xml = parse(sys.argv[1]) else: xml = parse("out.xml") out = csv.DictWriter( file("out.csv", "w"), ["id", "levela", "levelb", "levelc", "leveld", "levele", "levelf", "levelg", "levelh", "translations"], ) for term in xml.findall(".//%sterm" % NS): id = term.find("%stermIdentifier" % NS).text level = "level" + id[-1].lower() caption = [x.text for x in term.findall("%scaption/%slangstring" % (NS, NS)) if x.attrib["language"] == "en"][ 0 ].encode("ascii", "ignore") translations = len(set([x.text for x in term.findall("%scaption/%slangstring" % (NS, NS))])) new_row = {} new_row["id"] = id new_row[level] = caption new_row["translations"] = translations print id, new_row.keys() out.writerow(new_row)
def __init__(self, filename_): file = open(filename_,'r') tree = parse(file) root = tree.getroot() if root.tag == 'Documents': for document in root.getchildren(): doc = XMLDocument(document) doc.pathfile = filename_ self.append(doc) elif root.tag == 'Document': doc = XMLDocument(root) doc.pathfile = filename_ self.append(doc)
def readScriptConfig(): """ Read the config from the config directory, when not already exist init the default path test the F drive """ config = None ### faire un bouton update from internet ... copy.. if os.path.exists(CONFIG_FULL_PATH): # parse config file = open(CONFIG_FULL_PATH, "r") tree = parse(file) elemConfig = tree.getroot() # 'config' logvalue = elemConfig.findtext('logvalue') intLogValue = 0 # Set the global log value LOG_VALUE if logvalue != None: intLogValue = int(intLogValue) LOG_VALUE = intLogValue mediaNode = elemConfig.find('media') mediaUpdateURL = mediaNode.get('updateurl') podcastNode = elemConfig.find('podcast') podcastDownload = podcastNode.get('download') ispodcastDownload = None if podcastDownload != None: ispodcastDownload = podcastDownload.lower() == 'true' # 0 or 1 podcastDownloadPath = podcastNode.get('localpath') # init the config config = UserConfig(podcastDownloadPath, ispodcastDownload, mediaUpdateURL, intLogValue) else: #create the default config outPrint('Unable to find the config, create a default') config = UserConfig(None,None,None,None) # save it saveScriptConfig(config) # Test the media location path, use alternative otherwise if os.path.exists(config.podcastDownloadPath) == False: retc = createDirectory(config.podcastDownloadPath, False) if retc == False: dialog2 = xbmcgui.Dialog() dialog2.ok('Warning', 'Cannot Access to:', config.podcastDownloadPath, 'Will not be able to download podcasts.') return config
def __init__(self, input): self.filename = os.path.split(input)[-1] self.xmltree = parse(input).getroot() self.TODAY = date.today().isoformat() self.tag_names = ['ID', 'creator', 'created_date', 'modified_date', 'weight', 'aplha', 'BQP', 'BQP_buffer', 'cell_removal', 'spp_file']
def get_hea_payload(stop=306): params = urllib.urlencode({ 't': int(math.ceil(time.time())), 's': stop, }) url = "http://hea.thebus.org/getBus.asp?%s" % (params) tree = parse(urllib.urlopen(url)) description = tree.find('description') soup = BeautifulSoup(description.text) return soup.contents[0].contents
def __init__(self, filename): Keyboard.__init__(self, name=filename) self.name = filename try: tree = parse(filename) root = tree.getroot() keys = root.find('key-list') self.brand = root.find('./keyboard-info/keyboard-name/manufacturer').text self.model = root.find('./keyboard-info/keyboard-name/model').text # XXX extract more info except xml.parsers.expat.ExpatError, e: self.valid = False print e print self.filename, " ",
def getRSS(self, url): informasi=[] message ='' rss = parse(urllib.urlopen(url)).getroot() for data in rss.findall('gempa'): magnitude = data.find('Magnitude').text jumlah = data.find('Jumlah').text informasi.append([magnitude,jumlah]) message = message + 'Magnitude\t: '+magnitude+'\n'+'Jumlah\t\t: '+jumlah+'\n' print'\n================================' return message;
def read(fpath): from elementtree.ElementTree import parse root = parse(fpath).getroot() for patt in root: pattern = Pattern() pattern.name = patt.get('name') pattern.beats = int(patt.get('beats')) for note in patt: start = int(note.get('start')) stop = int(note.get('stop')) pitch = int(note.get('pitch')) pattern.add(Note(start, stop, pitch)) if pattern.name == 'None': pattern.name = None return pattern
def RemoverUsuario(self,Aluno): try: tree = parse("/var/lib/jenkins/config.xml") elem = tree.getroot() perm = elem.find('authorizationStrategy') busca = perm.findall('permission') for b in busca: if b.text == "hudson.model.Hudson.Read:%s"%Aluno: perm.remove(b) log.info("[+] Usuario %s removido",Aluno) break else: log.warning("[!] Usuario nao encontrado no Jenkins") tree.write("/var/lib/jenkins/config.xml") except Exception as e: log.error("[-] Erro ao remover o usuario %s ",e)
def get_bigtxt(path, topics): f = open("big.txt", 'a') vec = [] for topic in topics: targetpath = path + "xml/" + topic + ".xml" targetXML = open(targetpath, 'r') tree = parse(targetXML) root = tree.getroot() #print topic for neighbor in root.getiterator('photo'): desc = neighbor.attrib.get('description') f.write(desc.encode('utf8') + "\n") f.close()
def update_calendar(self, timeline = None, marker_name = None, frame_num = None): xmltree = parse(self.xmlresponse) min_delta = timedelta.max for entry in xmltree.getroot().findall('{http://www.w3.org/2005/Atom}entry'): start_time = entry.find('{http://schemas.google.com/g/2005}when').attrib['startTime'] if len(start_time) == 10: continue event_time = datetime.strptime(entry.find('{http://schemas.google.com/g/2005}when').attrib['startTime'][:-6], '%Y-%m-%dT%H:%M:%S.%f') event_delta = event_time - datetime.now() print entry.find('{http://www.w3.org/2005/Atom}title').text + ' ' + str(event_time) if (event_delta > timedelta(hours=0)) & (event_delta < min_delta): self.soonest_event = entry self.soonest_event_time = event_time self.soonest_event_name = entry.find('{http://www.w3.org/2005/Atom}title').text min_delta = event_delta self.eventtitletext.set_text(self.soonest_event_name) self.eventtimetext.set_text(str(min_delta)[:-10])
def read_topic(path): topics = [] topicpath = glob.glob(path + "*_topics.xml") topicXML = open(topicpath[0], 'r') tree = parse(topicXML) root = tree.getroot() for element in root.findall('topic'): topics.append(element.findtext('title')) #print '******* Topics *********' #print topics #print len(topics) #print '************************' return topics
def xml_init(self, f): """Initialize this instance from a Solr schema.xml""" tree = parse(f) e = tree.find('uniqueKey') if e is not None: self.uniqueKey = e.text.strip() e = tree.find('defaultSearchField') if e is not None: self.defaultSearchField = e.text.strip() types = {} for e in tree.findall('types/fieldType'): types[e.attrib['name']] = e for e in tree.findall('fields/field'): t = types[e.attrib['type']] self.fields.append(SolrField(e, t))
def readMediaElementTree(filePath, encoding=ENCODING_IN): """ Open the media file and return the list of all channels with programs and the version """ file = open(filePath, "r") tree = parse(file) elemMedia = tree.getroot() channelsList = [] catList = [] achannel = None conf_version = elemMedia.get('version') conf_author = elemMedia.get('author') conf_date = elemMedia.get('date') #for error printing channame = None progname = None #################### # read main category for elemCategory in elemMedia.getiterator('category'): try: name = elemCategory.text id = elemCategory.get('id') imagelink = elemCategory.get('imagelink') parentid = elemCategory.get('parentid') acat = Category(name, id, parentid, imagelink) catList.append(acat) except Exception, ex: nn = name if nn == None: nn = '' outPrint('problem when reading category ' + nn, ex) elemCategory.clear()
def get_tags_query(path, topic): vec = [] nodes = [] targetpath = path + "xml/" + topic + ".xml" targetXML = open(targetpath, 'r') tree = parse(targetXML) root = tree.getroot() #print topic for neighbor in root.getiterator('photo'): #rank = neighbor.attrib.get('rank') photoid = neighbor.attrib.get('id') ## -------> add node with phto id tags = neighbor.attrib.get('tags') tagvec = tags.split() #if not len(tagvec) == 0: vec.append(tagvec) nodes.append(photoid) tagdict = dict(zip(nodes, vec)) #print vec #return vec, nodes return tagdict, nodes
def getPodcastFileInfo(mediaFilePath): """ get the podcast title, description, as a utf string """ textpath = mediaFilePath + '.xml' file = open(textpath, "r") tree = parse(file) elemMedia = tree.getroot() # 'media' conf_version = elemMedia.get('version') title = elemMedia.findtext('title') description = elemMedia.findtext('description', '') elemMedia.clear() file.close() #poditeminfo = PodcastItem() #poditeminfo.title = title #poditeminfo.description = description #return poditeminfo return title, description
def get_hea_payload(self, stop): params = urllib.urlencode({ 't': int(math.ceil(time.time())), 's': stop, }) url = "http://hea.thebus.org/getBus.asp?%s" % (params) self.payload = urllib.urlopen(url) tree = parse(self.payload) description = tree.find('description') soup = BeautifulSoup(description.text) return soup.contents[0].contents # USAGE # items = get_hea_payload(393) # # for item in items: # i = TimeItem(item) # i.display()