def getAllColors(): # get all colors from the colors xml file and fill a list with tuples to sort later on allColors = [] colors_file = xbmc.translatePath( "special://home/addons/script.skin.helper.service/resources/colors/colors.xml" ).decode("utf-8") if xbmcvfs.exists(colors_file): doc = parse(colors_file) listing = doc.documentElement.getElementsByTagName("color") for count, color in enumerate(listing): name = color.attributes["name"].nodeValue.lower() colorstring = color.childNodes[0].nodeValue.lower() allColors.append((name, colorstring)) # get skin colors too colors_file = xbmc.translatePath("special://skin/colors/defaults.xml").decode("utf-8") if xbmcvfs.exists(colors_file): doc = parse(colors_file) listing = doc.documentElement.getElementsByTagName("color") for count, color in enumerate(listing): name = color.attributes["name"].nodeValue.lower() colorstring = color.childNodes[0].nodeValue.lower() allColors.append((name, colorstring)) return allColors
def getmodelvendor(type,ipaddress): if type=="thermostat": modeladdress=ipaddress.replace('/sys','/tstat/model') deviceModelUrl = urllib2.urlopen(modeladdress) if (deviceModelUrl.getcode()==200): deviceModel = parseJSONresponse(deviceModelUrl.read().decode("utf-8"),"model") deviceVendor = "RadioThermostat" deviceModelUrl.close() return {'model':deviceModel,'vendor':deviceVendor} elif type=="Philips": deviceUrl = urllib2.urlopen(ipaddress) dom=minidom.parse(deviceUrl) deviceModel=dom.getElementsByTagName('modelName')[0].firstChild.data deviceVendor=dom.getElementsByTagName('manufacturer')[0].firstChild.data deviceUrl.close() return {'model':deviceModel,'vendor':deviceVendor} elif type=="WeMo": deviceUrl = urllib2.urlopen(ipaddress) dom=minidom.parse(deviceUrl) deviceModel=dom.getElementsByTagName('modelName')[0].firstChild.data deviceVendor=dom.getElementsByTagName('manufacturer')[0].firstChild.data nickname = dom.getElementsByTagName('friendlyName')[0].firstChild.data if str(deviceModel).lower() == 'socket': deviceType = dom.getElementsByTagName('deviceType')[0].firstChild.data deviceType = re.search('urn:Belkin:device:([A-Za-z]*):1',deviceType).groups()[0] if (deviceType.lower() == 'controllee'): deviceModel = deviceModel else: deviceModel = 'Unknown' deviceUrl.close() return {'model':deviceModel,'vendor':deviceVendor,'nickname':nickname}
def query(self, token, langs=None): ''' makes a query and returns info (link, lang) about found subtitles''' guessedData = self.guessFileData(token) if "tvshow" != guessedData['type'] : return [] elif langs and not set(langs).intersection((['en', 'nl'])): # lang is given but does not include nl or en return [] if not langs : availableLangs = ['nl', 'en'] else : availableLangs = list(set(langs).intersection((['en', 'nl']))) log.debug("possible langs : %s " % availableLangs) sublinks = [] # Query the show to get the show id showName = guessedData['name'].lower() if exceptions.has_key(showName): show_id = exceptions.get(showName) elif self.cache['showids'].has_key(showName): show_id = self.cache['showids'].get(showName) else : getShowId_url = "%sGetShowByName/%s" %(self.api, urllib.quote(showName)) log.debug("Looking for show Id @ %s" % getShowId_url) page = urllib2.urlopen(getShowId_url) dom = minidom.parse(page) if not dom or len(dom.getElementsByTagName('showid')) == 0 : page.close() return [] show_id = dom.getElementsByTagName('showid')[0].firstChild.data self.cache['showids'][showName] = show_id f = open(self.cache_path, 'w') pickle.dump(self.cache, f) f.close() page.close() # Query the episode to get the subs for lang in availableLangs : getAllSubs_url = "%sGetAllSubsFor/%s/%s/%s/%s" %(self.api, show_id, guessedData['season'], guessedData['episode'], lang) log.debug("Looking for subs @ %s" %getAllSubs_url) page = urllib2.urlopen(getAllSubs_url) dom = minidom.parse(page) page.close() for sub in dom.getElementsByTagName('result'): release = sub.getElementsByTagName('filename')[0].firstChild.data if release.endswith(".srt"): release = release[:-4] dllink = sub.getElementsByTagName('downloadlink')[0].firstChild.data log.debug("Release found : %s" % release.lower()) log.debug("Searching for : %s" % token.lower()) if release.lower() == token.lower(): result = {} result["release"] = release result["link"] = dllink result["page"] = dllink result["lang"] = lang sublinks.append(result) return sublinks
def test_output_compatible_setup_nooutput(self): tmpfile = tempfile.mktemp() tmpfile2 = tempfile.mktemp() os.chdir(basedir) # Verify --silent can be supplied as app argument cmd_line = ('./scripts/avocado --silent run --job-results-dir %s ' '--sysinfo=off --xunit %s --json %s passtest.py' % (self.tmpdir, tmpfile, tmpfile2)) result = process.run(cmd_line, ignore_status=True) output = result.stdout + result.stderr expected_rc = exit_codes.AVOCADO_ALL_OK try: self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) self.assertEqual(output, "", "Output is not empty:\n%s" % output) # Check if we are producing valid outputs with open(tmpfile2, 'r') as fp: json_results = json.load(fp) debug_log = json_results['debuglog'] self.check_output_files(debug_log) minidom.parse(tmpfile) finally: try: os.remove(tmpfile) os.remove(tmpfile2) except OSError: pass
def check_balloon(logger,dom_name,dom_active,dom_eles): """ check balloon of given domain """ iDOM_XML = "/etc/libvirt/qemu/" + dom_name +".xml" aDOM_XML = "/run/libvirt/qemu/" + dom_name +".xml" if dom_active: xml = minidom.parse(aDOM_XML) dom = xml.getElementsByTagName('domain')[0] mem_max = int(dom.getElementsByTagName('memory')[0]\ .childNodes[0].data) mem_cur = int(dom.getElementsByTagName('currentMemory')[0]\ .childNodes[0].data) logger.debug("Checking balloon.maximum: %d" \ % dom_eles.get("balloon.maximum")) if not compare_value(logger,mem_max, \ dom_eles.get("balloon.maximum")): return False logger.debug("Checking balloon.current: %d" \ % dom_eles.get("balloon.current")) if not compare_value(logger,mem_cur, \ dom_eles.get("balloon.current")): return False else: xml = minidom.parse(iDOM_XML) mem_max = int(xml.getElementsByTagName('memory')[0].\ childNodes[0].data) logger.debug("Checking balloon.maximum: %d" \ % dom_eles.get("balloon.maximum")) if not compare_value(logger,mem_max, \ dom_eles.get("balloon.maximum")): return False return True
def _TestOutFile(self, test_name, expected_xml): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) # TODO([email protected]): libtool causes the built test binary to be # named lt-gtest_xml_outfiles_test_ instead of # gtest_xml_outfiles_test_. To account for this possibility, we # allow both names in the following code. We should remove this # hack when Chandler Carruth's libtool replacement tool is ready. output_file_name1 = test_name + ".xml" output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) expected = minidom.parseString(expected_xml) if os.path.isfile(output_file1): actual = minidom.parse(output_file1) else: actual = minidom.parse(output_file2) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink()
def writeallrevinfo(playername, datestart, dateend): filename2 = 'dumps/revdata_long/' + playername+"_"+str(datestart)[:-2]+'.xml' count = 0 cont_flag = 1 uniqueid = str(datestart)[:-2] urlstring = "rvstart="+str(dateend)[:-2] while cont_flag>0: if os.path.exists(filename2): pass else: query = "http://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvlimit=500&format=xml&rvprop=timestamp%7Cuser%7Csize%7Cflags&"+urlstring+"&rvend="+str(datestart)[:-2]+"&rvcontinue&titles="+playername print "\n getting " + playername + "\n------------------------\n" cur_page = urllib2.urlopen(query) revdata = open(filename2,'w+') revdata.write(cur_page.read()) revdata.close() revdata = open(filename2,'r') doc = minidom.parse(revdata) rvcont = doc.getElementsByTagName("query-continue") if len(rvcont)>0: revisions = rvcont[0].getElementsByTagName("revisions") rvstartid = revisions[0].getAttribute("rvstartid") urlstring = "rvstartid=" + str(rvstartid) uniqueid = rvstartid cont_flag = 1 filename2 = 'dumps/revdata_long/' + playername+"_"+str(uniqueid)+'.xml' else: cont_flag = 0 cont_flag = 1 count = 0 uniqueid = str(datestart)[:-2] while cont_flag>0: filename2 = 'dumps/revdata_long/' + playername+"_"+str(uniqueid)+'.xml' revdata = open(filename2,'r') doc = minidom.parse(revdata) count = count + len(doc.getElementsByTagName("rev")) rvcont = doc.getElementsByTagName("query-continue") if len(rvcont)>0: revisions = rvcont[0].getElementsByTagName("revisions") rvstartid = revisions[0].getAttribute("rvstartid") urlstring = "rvstartid=" + str(rvstartid) uniqueid = rvstartid cont_flag = 1 else: cont_flag = 0 return count
def CheckDate(self, context): # Get the <created> time for the input file root = minidom.parse(context.GetInputFilename()).documentElement inputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_animations", "animation", "asset", "created"))) if inputCreatedDate == None: context.Log("FAILED: Couldn't read <created> value from test input file.") return None # Get the output file outputFilenames = context.GetStepOutputFilenames("Export") if len(outputFilenames) == 0: context.Log("FAILED: There are no export steps.") return None # Get the <created> time for the output file root = minidom.parse(outputFilenames[0]).documentElement outputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_animations", "animation", "asset", "created"))) if outputCreatedDate == None: context.Log("FAILED: Couldn't read <created> value from the exported file.") return None if (outputCreatedDate - inputCreatedDate) != timedelta(0): context.Log("FAILED: <created> is not preserved.") context.Log("The original <created> time is " + str(inputCreatedDate)) context.Log("The exported <created> time is " + str(outputCreatedDate)) return False context.Log("PASSED: <created> element is preserved.") return True
def get_xml_data(self): try: xmlDoc = minidom.parse(self.name.replace("'", "") + ".xml") except: xmlDoc = minidom.parse("Wheaton College.xml") building = xmlDoc.getElementsByTagName('building') for card in building[0].getElementsByTagName('card'): type_card = Card() if card.attributes['type'].value == "spring_fling": type_card.set_kind("spring_fling") type_card.add_image(card.getElementsByTagName("image")[0].attributes['url'].value) type_card.add_fact(card.getElementsByTagName("text")[0].childNodes[0].nodeValue) elif card.attributes['type'].value == "secret_agent": type_card.set_kind("secret_agent") type_card.add_image(card.getElementsByTagName("image")[0].attributes['url'].value) for text in card.getElementsByTagName("text"): type_card.add_fact(text.childNodes[0].nodeValue) elif card.attributes['type'].value =="paragraph": type_card.set_kind("paragraph") type_card.add_fact(card.getElementsByTagName("text")[0].childNodes[0].nodeValue) elif card.attributes["type"].value == "modified_abe": type_card.set_kind("modified_abe") type_card.add_image(card.getElementsByTagName("image")[0].attributes['url'].value) type_card.add_fact(card.getElementsByTagName("text")[0].childNodes[0].nodeValue) else: type_card.set_kind("err") self.add_card(type_card)
def _load(self, filename): zip = zipfile.ZipFile(filename, 'r') zip.extractall(self.tmpdir) wdoc = minidom.parse(zip.open('word/document.xml')).documentElement wdoc.setAttribute('xmlns:wx', ns['wx']) wdoc.setAttribute('xmlns:a', ns['a']) self.body = wdoc.getElementsByTagName('w:body')[0] relfile = zip.open('word/_rels/document.xml.rels') for n in minidom.parse(relfile).getElementsByTagName('Relationship'): self.media[n.getAttribute('Id')] = ( n.getAttribute('Target'), n.getAttribute('Type') ) sdoc = minidom.parse(zip.open('word/styles.xml')) for s in sdoc.getElementsByTagName('w:style'): style_id = s.getAttribute('w:styleId') n = s.getElementsByTagName('w:name')[0] style_name = n.getAttribute('w:val') self.styles[style_name] = style_id try: self.property = CustomProperty(self, zip.open('docProps/custom.xml')) except: self.property = None try: self.settings = Settings(zip.open('word/settings.xml')) except: self.settings = None try: self.header = minidom.parse(zip.open('word/header1.xml')) except: self.header = None try: self.numberings = Numbering(zip.open('word/numbering.xml')) except: self.numberings = None zip.close()
def do_check(path): iml_file = os.path.join(path, 'project.iml') iml_dom = minidom.parse(iml_file) found_paths = set() for sourceFolder in self._get_sourceFolders(iml_dom): url = sourceFolder.getAttribute('url') source_path = re.sub(r'^.*/generated', 'generated', url) found_paths.add(source_path) self.assertIn("generated", found_paths) self.assertIn("generated_tests", found_paths) ipr_file = os.path.join(path, 'project.ipr') ipr_dom = minidom.parse(ipr_file) annotation_processing = self._get_compiler_configuration(ipr_dom).getElementsByTagName( 'annotationProcessing')[0] profile = annotation_processing.getElementsByTagName('profile')[0] self.assertEquals('True', profile.getAttribute('enabled')) self.assertEquals('true', profile.getAttribute('default')) self.assertEquals('Default', profile.getAttribute('name')) processor_path = profile.getElementsByTagName('processorPath')[0] self.assertEquals('true', processor_path.getAttribute('useClasspath')) source_output_dir = profile.getElementsByTagName('sourceOutputDir')[0] self.assertEquals('../../../generated', source_output_dir.getAttribute('name')) source_test_output_dir = profile.getElementsByTagName('sourceTestOutputDir')[0] self.assertEquals('../../../generated_tests', source_test_output_dir.getAttribute('name')) found_processors = set() for processor in profile.getElementsByTagName('processor'): found_processors.add(processor.getAttribute('name')) self.assertEquals({'com.google.auto.value.processor.AutoAnnotationProcessor', 'com.google.auto.value.processor.AutoValueBuilderProcessor', 'com.google.auto.value.processor.AutoValueProcessor'}, found_processors)
def CheckDate(self, context): self.__assistant.CheckCrashes(context) self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], []) if not self.__assistant.GetResults(): return False # Get the <created> time for the input file root = minidom.parse(context.GetInputFilename()).documentElement inputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "asset", "created"))) if inputCreatedDate == None: context.Log("FAILED: Couldn't read <created> value from test input file.") return False # Get the output file outputFilenames = context.GetStepOutputFilenames("Export") if len(outputFilenames) == 0: context.Log("FAILED: There are no export steps.") return False # Get the <created> time for the output file root = minidom.parse(outputFilenames[0]).documentElement outputCreatedDate = ParseDate(GetXmlContent(FindXmlChild(root, "asset", "created"))) if outputCreatedDate == None: context.Log("FAILED: Couldn't read <created> value from the exported file.") return False if (outputCreatedDate - inputCreatedDate) < timedelta(0): context.Log("FAILED: <created> has an incorrect time stamp. It should be later than the <created> value in the original file.") context.Log("The original <created> time is " + str(inputCreatedDate)) context.Log("The exported <created> time is " + str(outputCreatedDate)) return False context.Log("PASSED: <created> element is correct.") return True
def CheckDate(self, context): # Get the <modified> time for the input file root = minidom.parse(context.GetInputFilename()).documentElement inputDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "node", "asset", "modified"))) if inputDate == None: context.Log("FAILED: Couldn't read <modified> value from test input file.") return None # Get the output file outputFilenames = context.GetStepOutputFilenames("Export") if len(outputFilenames) == 0: context.Log("FAILED: There are no export steps.") return None # Get the <modified> time for the output file root = minidom.parse(outputFilenames[0]).documentElement outputDate = ParseDate(GetXmlContent(FindXmlChild(root, "library_visual_scenes", "visual_scene", "node", "asset", "modified"))) if outputDate == None: context.Log("FAILED: Couldn't read <modified> value from the exported file.") return None # Modified data must be greater than or equal to original date to pass if (outputDate - inputDate) < timedelta(0): context.Log("FAILED: <modified> is not preserved.") context.Log("The original <modified> time is " + str(inputDate)) context.Log("The exported <modified> time is " + str(outputDate)) return False context.Log("PASSED: <modified> element is preserved or updated correctly.") return True
def inlineSchema(sourcePath,destinationPath): """Open sourcePath. For every included XSD, look for the document in the current directory, and inline it. Write result to destinationPath.""" doc=xd.parse(sourcePath) docElt=doc.documentElement if not (docElt.nodeType==xd.Node.ELEMENT_NODE and docElt.tagName=="xs:schema"): raise Exception, "file {0} doesn't start with an elt of name xs:schema".format(sourcePath) inlined=set() toInline=set() findIncludes(docElt.firstChild,inlined,toInline) while len(toInline): incl=toInline.pop() inclDoc=xd.parse(incl) iDElt=inclDoc.documentElement if not iDElt.nodeType==xd.Node.ELEMENT_NODE and iDElt.tagName=="xs:schema": raise Exception, "file {0} doesn't start with an elt of name xs:schema".format(incl) findIncludes(iDElt.firstChild,inlined,toInline) for child in iDElt.childNodes: docElt.appendChild(child.cloneNode(True)) inclDoc.unlink() inlined.add(incl) #print "added: {0}".format(incl) destFile=codecs.open(destinationPath,'w','utf-8') docElt.writexml(destFile) destFile.close()
def getHealthNews(lan='en',format='json'): returnData = MutableString() returnData = '' if (lan == 'es'): dom = minidom.parse(urllib.urlopen(AppConfig.medlinePlusHealthNewsSpanishURL)) else: dom = minidom.parse(urllib.urlopen(AppConfig.medlinePlusHealthNewsEnglishURL)) rssTitle = MutableString() rssDescription = MutableString() rssURL = MutableString() for node in dom.getElementsByTagName('item'): for item_node in node.childNodes: rssTitle = '' rssDescription = '' rssURL = '' #item title if (item_node.nodeName == "title"): for text_node in item_node.childNodes: if (text_node.nodeType == node.TEXT_NODE): rssTitle += text_node.nodeValue #description if (item_node.nodeName == "description"): for text_node in item_node.childNodes: rssDescription += text_node.nodeValue #link to URL if (item_node.nodeName == "link"): for text_node in item_node.childNodes: rssURL += text_node.nodeValue if (format == 'json'): startTag = '{' endTag = '},' #cleanup #rssTitle = re.sub("\"", "'", rssTitle) rssTitle = re.sub("\n", "", rssTitle) rssTitle = re.sub("\"", "\\\"", rssTitle) rssDescription = re.sub("\"", "\\\"", rssDescription) rssDescription = re.sub("\n", "", rssDescription) rssDescription = re.sub("\t", " ", rssDescription) rssDescription = re.sub("\r", "", rssDescription) if (len(rssDescription) > 0): rssDescription = Formatter.data(format, 'description', escape(rssDescription))[:-1] else: startTag = '<record>' endTag = '</record>' if (len(rssDescription) > 0): rssDescription = Formatter.data(format, 'description', escape(rssDescription)) if (len(rssTitle) > 0): returnData += startTag + Formatter.data(format, 'title', rssTitle) if (len(rssURL) > 0): returnData += Formatter.data(format, 'url', rssURL) if (len(rssDescription) > 0 ): returnData += rssDescription + endTag return returnData
def test_output_compatible_setup_nooutput(self): tmpfile = tempfile.mktemp() tmpfile2 = tempfile.mktemp() os.chdir(basedir) cmd_line = './scripts/avocado run --silent --xunit %s --json %s sleeptest' % (tmpfile, tmpfile2) result = process.run(cmd_line, ignore_status=True) output = result.stdout + result.stderr expected_rc = 0 try: self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) self.assertEqual(output, "", "Output is not empty:\n%s" % output) # Check if we are producing valid outputs with open(tmpfile2, 'r') as fp: json_results = json.load(fp) debug_log = json_results['debuglog'] self.check_output_files(debug_log) minidom.parse(tmpfile) finally: try: os.remove(tmpfile) os.remove(tmpfile2) except OSError: pass
def __init__(self): file_handler_global = None file_handler_user = None xml_doc_global = None xml_doc_user = None self.__global_root_node = None self.__user_root_node = None self.__testcase_root_node = None self.__global_config_filename = self.get_global_config_filename() self.__user_config_filename = self.get_user_config_filename() self.__testcase_config_filename = self.get_testcase_config_filename() try: file_handler_global = open(self.__global_config_filename, "r") xml_doc_global = minidom.parse(file_handler_global) self.__global_root_node = xml_doc_global.getElementsByTagName("config")[0] except: self.__global_root_node = None try: file_handler_user = open(self.__user_config_filename, "r") xml_doc_user = minidom.parse(file_handler_user) self.__user_root_node = xml_doc_user.getElementsByTagName("config")[0] #print self.__user_root_node except Exception, err: #print Exception, err self.__user_root_node = None
def checkStandards( self ): print 'Check DIN EN 13201-2 classes...' checktree = parse( os.getcwd() + "/Standard_classes.xml" ) for node in checktree.getElementsByTagName( 'ME-Class' ): Lm = node.getAttribute( 'Lm' ) U0 = node.getAttribute( 'U0' ) Ul = node.getAttribute( 'Ul' ) if( float( Evaluator.meanLuminance ) >= float( Lm ) and float( Evaluator.uniformityOfLuminance ) >= float( U0 ) and float( Evaluator.lengthwiseUniformityOfLuminance ) >= float( Ul ) ): lumDIN = node.getAttribute( 'name' ) break else: lumDIN = 'None' continue print ' ME-Class fullfillment: ' + str( lumDIN ) evaltree = parse( self.xmlConfigPath + Evaluator.evalDirSuffix + '/Evaluation.xml' ) child1 = evaltree.createElement( "ClassFullfillment" ) child1.setAttribute( "class", lumDIN ) node1 = evaltree.getElementsByTagName( 'Luminance') node1.item(0).appendChild( child1 ) for node in checktree.getElementsByTagName( 'S-Class' ): Em = node.getAttribute( 'Em' ) Emin = node.getAttribute( 'Emin' ) g1 = node.getAttribute( 'g1' ) if( float( Evaluator.meanIlluminance ) >= float( Em ) and float( Evaluator.minIlluminance ) >= float( Emin ) ): illumDIN = node.getAttribute( 'name' ) break else: illumDIN = 'None' continue print ' S-Class fullfillment: ' + str( illumDIN ) child2 = evaltree.createElement( "ClassFullfillment" ) child2.setAttribute( "class", illumDIN ) node2 = evaltree.getElementsByTagName( 'Illuminance') node2.item(0).appendChild( child2 ) child3 = evaltree.createElement( "UniformityCriteria" ) child3.setAttribute( "true", "Yes" ) if ( float( Evaluator.uniformityOfIlluminance ) < float( g1 ) ): print ' Uniformity criteria not fullfilled! ' child3.setAttribute( "true", "No" ) node3 = evaltree.getElementsByTagName( 'Illuminance') node3.item(0).appendChild( child3 ) f = open( self.xmlConfigPath + Evaluator.evalDirSuffix + '/Evaluation.xml', "w" ) evaltree.writexml( f, "\n", " ") f.close( ) print ' done ...' print ''
def multiappend_dom (sname, dname): dom1 = parse(sname) domr = parse(sname) dom2 = parse(dname) ref = "" child_append = "" lines_to_append = [] # busco el ref for node in dom2.getElementsByTagName('cora:decorator'): ref = node.getAttribute("ref") print ref # busco el append for node in dom2.getElementsByTagName('cora:append'): for child in node.childNodes: s_append = child xml_append = s_append.toxml() if '<' in xml_append: xml_append = xml_append.rstrip() lines_to_append.append(s_append) #Guardo en esta lista las lineas a anexar #Busco el index index_r = 0 for i in range(0, len(domr.childNodes)): index_ref = domr.childNodes[i] if ref in index_ref.toxml(): index_r = i break # Los anexo for i in range(0, len(lines_to_append)): domr.childNodes[index_r].appendChild(lines_to_append[i]) print domr.toxml()
def main(): parser = OptionParser(conflict_handler="resolve") parser.set_usage("mail-instance-creator.py <from-email-address> <to-email-address> <languagecode> <wikiaddress>\n\n\texample: mail-instance-creator.py '*****@*****.**' 'es' 'http://example.com/w/'") (options, args) = parser.parse_args() if len(args) != 4: parser.error("mail-instance-creator.py expects exactly four arguments.") fromaddress = args[0] toaddress = args[1] lang = args[2] wikiaddress = args[3] subjecturl = wikiaddress + 'api.php?action=expandtemplates&text={{msgnw:mediawiki:openstackmanager-email-subject/' + lang + '}}&format=xml' bodyurl = wikiaddress + 'api.php?action=expandtemplates&text={{msgnw:mediawiki:openstackmanager-email-body/' + lang + '}}&format=xml' dom = minidom.parse(urllib.urlopen(subjecturl)) subject = dom.getElementsByTagName('expandtemplates')[0].firstChild.data dom = minidom.parse(urllib.urlopen(bodyurl)) body = dom.getElementsByTagName('expandtemplates')[0].firstChild.data body = body + ' ' + gethostname() sendmail_location = "/usr/sbin/sendmail" # sendmail location p = os.popen("%s -t" % sendmail_location, "w") p.write("From: %s\n" % fromaddress) p.write("To: %s\n" % toaddress) p.write("Subject: %s\n" % subject) p.write("\n") # blank line separating headers from body p.write(body) status = p.close() return status
def main(): parser = OptionParser(conflict_handler="resolve") parser.set_usage("mail-instance-creator.py <from-email-address> <to-email-address> <languagecode> <wikiaddress>\n\n\texample: mail-instance-creator.py '*****@*****.**' 'es' 'http://example.com/w/'") (options, args) = parser.parse_args() if len(args) != 4: parser.error("mail-instance-creator.py expects exactly four arguments.") fromaddress = args[0] toaddress = args[1] lang = args[2] wikiaddress = args[3] subjecturl = wikiaddress + 'api.php?action=expandtemplates&text={{msgnw:mediawiki:openstackmanager-email-subject/' + lang + '}}&format=xml' bodyurl = wikiaddress + 'api.php?action=expandtemplates&text={{msgnw:mediawiki:openstackmanager-email-body/' + lang + '}}&format=xml' dom = minidom.parse(urllib.urlopen(subjecturl)) subject = dom.getElementsByTagName('expandtemplates')[0].firstChild.data dom = minidom.parse(urllib.urlopen(bodyurl)) p = subprocess.Popen("ssh-keygen -lf /etc/ssh/ssh_host_rsa_key.pub", shell=True, stdout=subprocess.PIPE) fingerprint = p.communicate()[0] fingerprint = fingerprint.split(' ')[1] body = dom.getElementsByTagName('expandtemplates')[0].firstChild.data body = body + ' ' + gethostname() + ' (' + fingerprint + ')' message = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (fromaddress, toaddress, subject, body) p = subprocess.Popen("/usr/sbin/sendmail -t", shell=True, stdin=subprocess.PIPE) p.communicate(message) if p.wait() != 0: return 1
def _TestOutFile(self, test_name, expected_xml): gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] p = gtest_test_utils.Subprocess(command, working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) output_file_name1 = test_name + ".xml" output_file1 = os.path.join(self.output_dir_, output_file_name1) output_file_name2 = 'lt-' + output_file_name1 output_file2 = os.path.join(self.output_dir_, output_file_name2) self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), output_file1) expected = minidom.parseString(expected_xml) if os.path.isfile(output_file1): actual = minidom.parse(output_file1) else: actual = minidom.parse(output_file2) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink()
def updateWithExistingCerts(): logDebug(">>>updateWithExistingCerts", momHandlerLogger) baseDir = h2hprops.get(KYTARGETDIRPATTERN) + File.separator + h2hprops.get(KYCCIVERSION_SOFTLINK) logInfo("basedir = %s" % baseDir, momHandlerLogger) if not os.path.exists(baseDir): logError("No existing installation to get MAD and GUID certificate details", momHandlerLogger) sys.exit(1) currentEOCTFFIle = baseDir + File.separator + h2hprops.get(KYTARGETCONFIGDIR) + File.separator + EOCTFFILE newEOCTFFile = h2hprops.get(KYPACKAGE_LOCATION) + File.separator + h2hprops.get(KYTARGETCONFIGDIR) + File.separator + EOCTFFILE removeCM(newEOCTFFile) srcEocTFXML = parse(currentEOCTFFIle) dstEocTFXML = parse(newEOCTFFile) srcClientEndPointEl = srcEocTFXML.getElementsByTagName("ClientEndPoint") dstClientEndPointEl = dstEocTFXML.getElementsByTagName("ClientEndPoint") i=0 for node in dstClientEndPointEl: parentNode = node.parentNode parentNode.removeChild(node) parentNode.appendChild(srcClientEndPointEl[i].cloneNode(True)) i = i + 1 dstEocFile = open(currentEOCTFFIle,"w") dstEocTFXML.writexml(dstEocFile) dstEocFile.close() logDebug("<<<updateWithExistingCerts", momHandlerLogger)
def __init__(self, filename, shortXMLfilename): self.filename = filename self.shortXMLfilename = shortXMLfilename try: self.tableFile = parse(filename) except: impl = getDOMImplementation() self.tableFile = impl.createDocument(None, "table", None) self.tableFile.getElementsByTagName("table")[0].setAttribute("class", "sortable") self.tableFile.getElementsByTagName("table")[0].setAttribute("id", "xeplist") self.tableFile.getElementsByTagName("table")[0].setAttribute("cellspacing", "0") self.tableFile.getElementsByTagName("table")[0].setAttribute("cellpadding", "3") self.tableFile.getElementsByTagName("table")[0].setAttribute("border", "1") header = parseString( '''<tr class='xepheader'> <th align='left'>Number</th> <th align='left'>Name</th> <th align='left'>Type</th> <th align='left'>Status</th> <th align='left'>Date</th> </tr>''') self.tableFile.getElementsByTagName("table")[0].appendChild(header.getElementsByTagName("tr")[0]) try: self.botsFile = parse(shortXMLfilename) except: impl = getDOMImplementation() self.botsFile = impl.createDocument(None, "xeps", None)
def doit(deepcopy=1): f = mk_foo(6) #xml_pickle.setDeepCopy(deepcopy) print "CREATE XML" t1 = time() fh = open('aaa.xml','w') x = xml_pickle.dump(f,fh) fh.close() print "TIME = %f"%(time()-t1) print "Pickle len = ",os.stat('aaa.xml')[ST_SIZE] print "minidom pure parse" t1 = time() fh = open('aaa.xml','r') minidom.parse(fh) fh.close() print "TIME = %f"%(time()-t1) print "xml_pickle load" t1 = time() fh = open('aaa.xml','r') #xml_pickle.setParser("DOM") # default, but just to be sure o = xml_pickle.load(fh) fh.close() print "TIME = %f"%(time()-t1) del o
def parseslidecontent(pptxfile, words, booknum, verbose=False): global paragraphtext skippages = [] tmpd = tempfile.mkdtemp() zipfile.ZipFile(pptxfile).extractall(path=tmpd, pwd=None) # Parse slide content first path = tmpd + '/ppt/slides/' for infile in glob.glob(os.path.join(path, '*.xml')): #parse each XML notes file from the notes folder. dom = parse(infile) noteslist = dom.getElementsByTagName('a:t') page = re.sub(r'\D', "", infile.split("/")[-1]) text = '' for node in noteslist: xmlTag = node.toxml() xmlData = xmlTag.replace('<a:t>', '').replace('</a:t>', '') text += " " + xmlData # Convert to ascii to simplify text = text.encode('ascii', 'ignore') if "Course Roadmap" in text: if verbose: print "Skipping page %d:%s, \"Course Roadmap\" slide."%(booknum,page) skippages.append(page) words[str(booknum) + ":" + page] = '' else: words[str(booknum) + ":" + page] = text # Next, parse notes content, skipping pages previously identified path = tmpd + '/ppt/notesSlides/' for infile in glob.glob(os.path.join(path, '*.xml')): # Get the slide number page = re.sub(r'\D', "", infile.split("/")[-1]) if page in skippages: # Skip this page previously identified with "Course Roadmap" title text continue # Parse slide notes, adding a space after each paragraph marker, and removing XML markup dom = parse(infile) paragraphs=dom.getElementsByTagName('a:p') for paragraph in paragraphs: paragraphtext="" parse_node(paragraph) #print "DEBUG: " + paragraphtext words[str(booknum) + ":" + str(page)] += " " + paragraphtext # Remove all the files created with unzip shutil.rmtree(tmpd) # Remove double-spaces which happens in the content occasionally for page in words: words[page] = ''.join(ch for ch in words[page] if ch not in set([',','(',')'])) words[page] = re.sub('\. ', " ", words[page]) words[page] = ' '.join(words[page].split()) return words
def disableRG(sessionID, gpon_type, gpon_fsan, ont): target_url = str(config.protocol)+'://'+str(config.host)+':'+str(config.port) +str(config.extension) xml_request = """ <soapenv:Envelope xmlns:soapenv="http://www.w3.org/2003/05/soap-envelope"> <soapenv:Body> <rpc message-id="1" nodename="%s" username="******" sessionid="%s"> <get> <filter type="subtree"> <top> <object> <type>Ont</type> <id> <ont>%s</ont> </id> </object> </top> </filter> </get> </rpc> </soapenv:Body> </soapenv:Envelope> """ % (config.nodename, config.username, sessionID, ont) request = urllib2.Request(target_url, xml_request) request.add_header('Content-Type','text/plain;charset=UTF-8') resultRead = urllib2.urlopen(request).read() #uncommet these to print debug info result = urllib2.urlopen(request) print parse( result ).toprettyxml() result.close()
def musicSearch(): musicopen = open(musicfile,"r") musicread = musicopen.read() musiclist= musicread.split("\n") musicopen.close() print str(len(musiclist)-1) + " Artists Found in Your Wanted List..." if myplexstatus=="enable": musichttp=url+"/library/sections/"+musicid+"/all"+"?X-Plex-Token="+plextoken else: musichttp=url+"/library/sections/"+musicid+"/all" website = urllib.urlopen(musichttp) xmldoc = minidom.parse(website) #Get list of artists itemlist = xmldoc.getElementsByTagName('Directory') print str(len(itemlist)) + " Total Artists Found" for item in itemlist: musictitle = item.attributes['title'].value musictitle = re.sub(r'[^\x00-\x7F]+',' ', musictitle) musictitle = re.sub(r'\&','and', musictitle) musickey = item.attributes['key'].value if (musictitle in musiclist) or (musicsync=="enable"): if myplexstatus=="enable": cdhttp=url+musickey+"?X-Plex-Token="+plextoken else: cdhttp=url+musickey cdweb=urllib.urlopen(cdhttp) xmlcd=minidom.parse(cdweb) #get List of CDs cdlist=xmlcd.getElementsByTagName('Directory') for cd in cdlist: cdtitle = cd.attributes['title'].value if (cdtitle != "All tracks"): cdkey = cd.attributes['key'].value if myplexstatus=="enable": songhttp=url+cdkey+"?X-Plex-Token="+plextoken else: songhttp=url+cdkey songweb=urllib.urlopen(songhttp) xmlsong=minidom.parse(songweb) #Get List of Songs songlist=xmlsong.getElementsByTagName('Track') for song in songlist: songtitle = song.attributes['title'].value songrating = song.attributes['ratingKey'].value if songtitle=="": songtitle = songrating partindex = song.getElementsByTagName('Part') songfile = partindex[0].attributes['key'].value songcontainer = partindex[0].attributes['container'].value if myplexstatus=="enable": songlink=url+songfile+"?X-Plex-Token="+plextoken else: songlink=url+songfile songDownloader(musictitle,cdtitle,songtitle,songlink,songcontainer) else: print "Skipping all leaves." else: print musictitle + " Not in Wanted List"
def test_output_compatible_setup_3(self): tmpfile = tempfile.mktemp(prefix='avocado_' + __name__) tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__) tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__) tmpfile3 = tempfile.mktemp(dir=tmpdir) os.chdir(basedir) cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off ' '--xunit %s --json %s --html %s passtest.py' % (self.tmpdir, tmpfile, tmpfile2, tmpfile3)) result = process.run(cmd_line, ignore_status=True) output = result.stdout + result.stderr expected_rc = exit_codes.AVOCADO_ALL_OK tmpdir_contents = os.listdir(tmpdir) self.assertEqual(len(tmpdir_contents), 5, 'Not all resources dir were created: %s' % tmpdir_contents) try: self.assertEqual(result.exit_status, expected_rc, "Avocado did not return rc %d:\n%s" % (expected_rc, result)) self.assertNotEqual(output, "", "Output is empty") # Check if we are producing valid outputs with open(tmpfile2, 'r') as fp: json_results = json.load(fp) debug_log = json_results['debuglog'] self.check_output_files(debug_log) minidom.parse(tmpfile) finally: try: os.remove(tmpfile) os.remove(tmpfile2) shutil.rmtree(tmpdir) except OSError: pass
def deserialize(self, source, prefixes=None, strict=False, as_attribute=None, as_list=None, as_attribute_of_element=None): doc = None if isinstance(source, basestring): if os.path.exists(source): doc = xdm.parse(source) else: doc = xdm.parseString(source) elif hasattr(source, 'read'): doc = xdm.parse(source) document = bridge.Document() document.as_attribute = as_attribute or {} document.as_list = as_list or {} document.as_attribute_of_element = as_attribute_of_element or {} self.__deserialize_fragment(doc, document) if doc: try: doc.unlink() except KeyError: pass return document
def run(self, input_file, input_directory): self.file_manager.log("FUNCTION run() args -> input_file:" + input_file + ", input_directory:" + input_directory) with open(input_file, "r") as f: self.file_manager.log("OPEN input_file:" + input_file) for line in f: media_id = str(line).replace('\n', '') self.file_manager.log("READLN media_id:" + media_id) if media_id: local_input_directory = input_directory + media_id + "/" manifest_extension = "mpd" local_media_dash = local_input_directory + "manifest." + manifest_extension # replace_list = local_media_directory + manifest_extension + "_replace.txt" # rollback_list = local_media_directory + manifest_extension + "_rollback.txt" remote_dash_storage_host = "" remote_dash_basepath = "" manifest_info_file = local_input_directory + manifest_extension + "_info.txt" with open(manifest_info_file, "r") as dash_basepath_file: self.file_manager.log("OPEN [" + media_id + "] manifest_info_file:" + manifest_info_file) for dash_basepath_line in dash_basepath_file: self.file_manager.log("READLN [" + media_id + "] dash_basepath_line:" + dash_basepath_line) dash_basepath_line = str( dash_basepath_line).replace('\n', '') remote_dash_storage_host = dash_basepath_line.split( ",")[0] remote_dash_basepath = dash_basepath_line.split( ",")[1] self.file_manager.log( "STORE [" + media_id + "] " + "remote_dash_storage_host:" + input_file + ", remote_dash_storage_host:" + input_directory) count_match = 0 if os.path.exists(local_media_dash): self.file_manager.log("CONTINUE [" + media_id + "] (exists) local_media_dash:" + local_media_dash) manifest_dom = minidom.parse(local_media_dash) for docAdaptationSet in manifest_dom.getElementsByTagName( 'AdaptationSet'): if docAdaptationSet.attributes['contentType']: if docAdaptationSet.attributes[ 'contentType'].value == 'text': count_match += 1 self.file_manager.log( "AdaptationSet contentType is text") subtitle_language = docAdaptationSet.attributes[ 'lang'].value #lang="zh-Hans" docRepresentation = self.getNodeByName( docAdaptationSet, "Representation") docBaseURL = self.getNodeByName( docRepresentation, "BaseURL") subtitle_uri = "" if docBaseURL: subtitle_uri = self.getNodeText( docBaseURL) if subtitle_language and subtitle_uri: self.file_manager.log( "PROCESS [" + media_id + "]" + " subtitle_language: " + subtitle_language + ", subtitle_uri: " + subtitle_uri) local_replace_sourcepath = "replace/" + media_id + "/" local_rollback_sourcepath = "rollback/" + media_id + "/" self.file_manager.validate_dir( self.file_manager.out_path, local_replace_sourcepath ) # ensure folder exists self.file_manager.validate_dir( self.file_manager.out_path, local_rollback_sourcepath ) # ensure folder exists local_replace_sourcepath = self.file_manager.out_path + "/" + local_replace_sourcepath local_rollback_sourcepath = self.file_manager.out_path + "/" + local_rollback_sourcepath # Copy vtt from DownloadStorageFiles and rename fixed_subtitle_input = local_input_directory + subtitle_language + ".vtt" local_replace_sourcefile = local_replace_sourcepath + subtitle_uri self.file_manager.log( "COPY START [" + media_id + "] Replace preparation " + " fixed_subtitle_input: " + fixed_subtitle_input + ", local_replace_sourcefile: " + local_replace_sourcefile) copyfile(fixed_subtitle_input, local_replace_sourcefile) self.file_manager.log( "COPY END [" + media_id + "] Replace preparation " + " fixed_subtitle_input: " + fixed_subtitle_input + ", local_replace_sourcefile: " + local_replace_sourcefile) # Download remote vtt file remote_location = remote_dash_basepath + subtitle_uri local_rollback_sourcefile = local_rollback_sourcepath + subtitle_uri self.file_manager.log( "COPY START [" + media_id + "] Rollback preparation " + " remote_location: " + remote_location + ", local_rollback_sourcefile: " + local_rollback_sourcefile) if self.download( remote_location, local_rollback_sourcefile, remote_dash_storage_host): self.file_manager.out( media_id + "," + local_replace_sourcefile + "," + remote_dash_storage_host + "," + remote_location, manifest_extension + "_replace.txt") self.file_manager.out( media_id + "," + local_rollback_sourcefile + "," + remote_dash_storage_host + "," + remote_location, manifest_extension + "_rollback.txt") else: self.file_manager.log( "FAILED [" + media_id + "]" + " to download source vtt for backup" ) self.file_manager.log( "COPY END [" + media_id + "] Rollback preparation " + " remote_location: " + remote_location + ", local_rollback_sourcefile: " + local_rollback_sourcefile) else: self.file_manager.log( "SKIP [" + media_id + "] Subtitle has empty values" + " subtitle_language: " + subtitle_language + ", subtitle_uri: " + subtitle_uri) else: self.file_manager.log( "SKIP [" + media_id + "] (do not exist) local_media_dash:" + local_media_dash) if count_match == 0: self.file_manager.log( "SKIP [" + media_id + "] Manifest doesn't have subtitles, mediaId: " + media_id) self.file_manager.log("--END -- :" + input_file) return True
#!/usr/bin/env python # coding:utf-8 # 读取svg的xml文件 from xml.dom import minidom # 解析svg路径 from svg.path import parse_path import numpy as np import cv2 as cv import random # 1.读取xml文件 doc = minidom.parse('svg/output.svg') #doc = minidom.parse('svg/1.svg') # 2.查找path标签 paths = doc.getElementsByTagName('path') svgTag = doc.getElementsByTagName('svg') w = svgTag[0].getAttribute('width') h = svgTag[0].getAttribute('height') print(type(w), h) ##获取位移与缩放比例 gs = doc.getElementsByTagName('g') translates_scales = gs[0].getAttribute('transform').split(" ") if len(translates_scales) > 1: translates, scales = translates_scales print(translates.find("(")) print(scales.find("(")) #位移 translate = translates[translates.find("(") + 1:len(translates) - 1].split(',') #缩放比例
n_args = len(sys.argv) - 1 if n_args != 1: print("Usage: %(prog) <salome_root_directory>" % {'prog': sys.argv[0]}) sys.exit(1) salome_root_dir = sys.argv[1] root = None path = os.path.join(salome_root_dir, '.config_appli_template.xml') if not os.path.isfile(path): raise Exception('XML file: ' + path + ' not found') doc = minidom.parse(path) root = doc.documentElement nodeList = [] env_modules_node = getChildNode(root, 'env_modules') if env_modules_node != None: nodeList = childNodeList(env_modules_node, 'env_module') names = "" for node in nodeList: names += " " + str(node.getAttribute('name')) names = names.lstrip() print(names)