def ui_magic(object, ui_file, prefix): main_ui_filename = ui_file object.xml = gtk.Builder () object.xml.add_from_file (main_ui_filename) objects = object.xml.get_objects() for content in objects: try: if isinstance(content, gtk.Label): if content.get_label() != None and len(content.get_label()) > 0 : content.set_markup(_(content.get_label())) elif isinstance(content, gtk.Button): if content.get_label() != None and len(content.get_label()) > 0 : content.set_label(_(content.get_label())) else: if content.get_text() != None and len(content.get_text()) > 0 : content.set_text(_(content.get_text())) except AttributeError: pass # This is a workarround. For some reason obj.get_name don't return # the real name of the widget from xml.etree.ElementTree import ElementTree xml = ElementTree() xml.parse(main_ui_filename) for obj in xml.findall ('.//object'): try: if obj.attrib["id"].startswith(prefix) : widget = object.xml.get_object(obj.attrib["id"]) widget_name = obj.attrib["id"][len(prefix)+1:] exec ('object.%s = widget' % widget_name) except: print "Something fails at ui_magic"
def add_text_layer(self,pdf, hocrfile, page_num,height, dpi): """Draw an invisible text layer for OCR data""" p1 = re.compile('bbox((\s+\d+){4})') p2 = re.compile('baseline((\s+[\d\.\-]+){2})') hocr = ElementTree() hocr.parse(hocrfile) logging.debug(xml.etree.ElementTree.tostring(hocr.getroot())) for c in hocr.getroot(): # Find the <body> tag if c.tag != 'body': continue for page in c: # Each child in the body is a page tag if (page.attrib['class'] != "ocr_page"): assert ("Why is this hocr not paging properly??") if page.attrib['id'] == 'page_%d' %(page_num): break for line in page.findall(".//{http://www.w3.org/1999/xhtml}span"): #for line in page.findall(".//span"): if line.attrib['class'] != 'ocr_line': continue linebox = p1.search(line.attrib['title']).group(1).split() try: baseline = p2.search(line.attrib['title']).group(1).split() except AttributeError: baseline = [ 0, 0 ] linebox = [float(i) for i in linebox] baseline = [float(i) for i in baseline] for word in line: if word.attrib['class'] != 'ocrx_word': continue word_text = [] for child in word.iter(): if child.text: word_text.append(child.text) word.text = ' '.join(word_text) logging.debug(word.text) #for child in word: #if child.tag: #word.text = child.text if word.text is None: continue font_width = pdf.stringWidth(word.text.strip(), 'invisible', 8) if font_width <= 0: continue box = p1.search(word.attrib['title']).group(1).split() box = [float(i) for i in box] b = self.polyval(baseline, (box[0] + box[2]) / 2 - linebox[0]) + linebox[3] text = pdf.beginText() text.setTextRenderMode(3) # double invisible text.setFont('invisible', 8) text.setTextOrigin(box[0] * 72 / dpi, height - b * 72 / dpi) box_width = (box[2] - box[0]) * 72 / dpi text.setHorizScale(100.0 * box_width / font_width) text.textLine(word.text.strip()) #logging.debug( "Pg%s: %s" % (page_num,word.text.strip())) pdf.drawText(text)
def qrcode_render(content, size, padding, version, em, ec, newline, parent): if newline: content = content.replace("\\n", "\n") content = content.replace("\\r", "\r") # Generate QR Code - call web service qrcode = qrcode_generate(content, size, padding, version, em, ec) #if not result: # return # Parse SVG and draw elements to the workspace tree = ElementTree() tree.parse(StringIO(qrcode)) root = tree.getroot() xmlns = "{http://www.w3.org/2000/svg}" modules = list(root.getiterator(xmlns + "rect")) for m in modules: # <rect x="32" y="32" width="8" height="8" style="fill:rgb(0,0,0);" /> x = m.attrib["x"] y = m.attrib["y"] w = m.attrib["width"] h = m.attrib["height"] style = m.attrib["style"] qrcode_draw_module((w,h), (x,y), style, parent)
def get_feed(param): ''' Returns a news feed (usage: #news bbc -> bbc news feed). Get available feeds with #news (without parameters). ''' keys = NEWS_FEEDS.keys() if not param in keys: return 'Feed not in list. Loaded feeds: %s' % ', '.join(keys) handle = None try: handle = urllib2.urlopen(NEWS_FEEDS[param]) except urllib2.HTTPError: return 'Did not find feed.' rss = ElementTree() try: rss.parse(handle) except ExpatError: return 'Malformed XML. This feed sucks! (Clean ATOM or RSS required!)' handle.close() title = rss.find('channel/title') if title == None: return 'Did not find any matching tags.' items = '' counter = 0 for item in rss.getiterator('item'): if counter >= NEWS_RESULTS: break counter += 1 items += '%s: %s\n' % (item.find('title').text.strip(), shorten_url(item.find('link').text)) output = '[%s]\n%s' % (title.text.strip(), items[:-1]) return output
def _get_build_metadata(self, dir_name): if os.path.exists(os.path.join(dir_name, 'jenkins_build.tar.gz')): raise Skip('the build has already been archived', dir_name) # Read the build.xml metadata file that Jenkins generates build_metadata = os.path.join(dir_name, 'build.xml') if not os.access(build_metadata, os.R_OK): self.log.debug("Can't read build file at %s" % (build_metadata)) raise Exception("Can't access build.xml at %s" % (build_metadata)) else: tree = ElementTree() tree.parse(build_metadata) keys = ['result', 'number', 'duration'] kv_pairs = ((k, tree.find(k)) for k in keys) d = dict([(k, v.text) for k, v in kv_pairs if v is not None]) try: d['branch'] = tree.find('actions') \ .find('hudson.plugins.git.util.BuildData') \ .find('buildsByBranchName') \ .find('entry') \ .find('hudson.plugins.git.util.Build') \ .find('revision') \ .find('branches') \ .find('hudson.plugins.git.Branch') \ .find('name') \ .text except Exception: pass return d
def __init__(self, xmlfile = None): tree = ElementTree() tree.parse(xmlfile) root = tree.getroot() self._serial = root[1].text self._sdate = root[2].text self._tdate = email.utils.parsedate(self._sdate) uccgrp = root[3] grpreg = root[4] for ucc in uccgrp: prefix = ucc[0].text for grp in ucc[2]: length = int(grp[1].text) start, end = grp[0].text.split('-') self._range_grp[prefix + start] = length self._range_grp[prefix + end] = length for grp in grpreg: prefix = grp[0].text.replace('-','') for reg in grp[2]: length = int(reg[1].text) start, end = reg[0].text.split('-') self._range_reg[prefix + start] = length self._range_reg[prefix + end] = length
def read_config(config_file): tree = ElementTree() tree.parse(config_file) root = tree.getroot() server = root.attrib['name'] server_vm = root.attrib['virtual_machine'] protocals = root.getchildren() acnt = [0,0,0,0] cur_datetime = datetime.now() guacamole_client_list=[] for protocal in protocals: pro_name = protocal.attrib['name'] clients = protocal.getchildren() cnt = 0 for client in clients: cnt+=1 client_name = client.attrib['name'] client_host = client[0].text client_vm = client[1].text guacamoleClientInfo = GuacamoleClientInfo('','',server,client_name,pro_name,client_host,client_vm,0,cur_datetime) guacamole_client_list.append(guacamoleClientInfo) if pro_name=='vnc': acnt[0] = cnt elif pro_name=='vnc-read-only': acnt[1] = cnt elif pro_name=='ssh': acnt[2] = cnt else: acnt[3] = cnt guacamoleServerLoad = GuacamoleServerLoad(server,server_vm,acnt[0],acnt[1],acnt[2],acnt[3],sum(acnt),cur_datetime,0) guacamoleServerLoad.guacamole_client_info = guacamole_client_list return guacamoleServerLoad
def patch(pom_file, version): '''Updates the version in a POM file. We need to locate //project/parent/version, //project/version and //project/properties/project-version and replace the contents of these with the new version''' if settings['verbose']: prettyprint("Patching %s" % pom_file, Levels.DEBUG) tree = ElementTree() tree.parse(pom_file) need_to_write = False tags = [] tags.append(get_parent_version_tag(tree)) tags.append(get_project_version_tag(tree)) tags.append(get_properties_version_tag(tree)) for tag in tags: if tag != None and "-SNAPSHOT" in tag.text: if settings['verbose']: prettyprint("%s is %s. Setting to %s" % (str(tag), tag.text, version), Levels.DEBUG) tag.text=version need_to_write = True if need_to_write: # write to file again! write_pom(tree, pom_file) return True else: if settings['verbose']: prettyprint("File doesn't need updating; nothing replaced!", Levels.DEBUG) return False
def testcase_XML(webappName): tree = ElementTree() tree.parse(const.path + "/tests.result.xml") root = tree.getroot() lst_node = root.getiterator("set") for node in lst_node: SubElement( node, "testcase", { "component": "core", "purpose": "Check if Packaged Web Application can be installed/launch/uninstall successfully", "execution_type": "auto", "id": webappName, }, ) cnode = root.getiterator("testcase") desnode = cnode[-1] SubElement(desnode, "description") entrynode = desnode[0] SubElement(entrynode, "test_script_entry", {"test_script_expected_result": "0"}) entryentrynode = root.getiterator("test_script_entry") entr = entryentrynode[-1] entr.text = ( "app_user@/opt/usr/media/tct/opt/wrt-manifest-tizen-tests/appinstall.sh " + webappName + "." + Pack_Type ) tree.write(const.path + "/tests.result.xml")
def result_manifest_XML(result_manifest_xml_file_path,webappFile,auto_Result,manifest_cont): try: tree = ElementTree() tree.parse(result_manifest_xml_file_path + "/result/" + webappFile) root = tree.getroot() rset = root.getchildren() for mset in rset: testcase = mset.findall("set") testcase[0].set("name",Test_Flag) for mtestcase in testcase: cnode = mtestcase.getiterator("testcase") if (len(cnode)==1): auto_result = root.getiterator("auto_result") #auto_result = cnode.getiterator("auto_result") auto_result[0].text = auto_Result else: if (len(cnode)==0): SubElement(mtestcase,"testcase", {'component':'Runtime Core','purpose':'Check if packaged web application can be installed/launched/uninstalled successfully','execution_type' : 'auto', 'id' : webappFile.split(".")[0]}) result_node = mtestcase.find("testcase") SubElement(result_node,"auto_result") SubElement(result_node,"testcommand") auto_node = result_node.find("auto_result") auto_node.text = auto_Result testcommand_node = result_node.find("testcommand") testcommand_node.text = manifest_cont.decode("utf-8") tree.write(result_manifest_xml_file_path + "/result/" + webappFile) except Exception,e: print Exception,"Generate manifest.xml error:",e
def prepareLCSIMFile(inputlcsim, outputlcsim, numberofevents, trackingstrategy, inputslcio, jars = None, cachedir = None, outputFile = None, outputRECFile = None, outputDSTFile = None, debug = False): """Writes out a lcsim file for LCSIM Takes the parameters passed from :mod:`~ILCDIRAC.Workflow.Modules.LCSIMAnalysis` :param string inputlcsim: name of the provided lcsim :param string outputlcsim: name of the lcsim file on which LCSIM is going to run, defined in :mod:`~ILCDIRAC.Workflow.Modules.LCSIMAnalysis` :param int numberofevents: Number of events to process :param string trackingstrategy: trackingstrategy file to use, can be empty :param inputslcio: list of slcio files on which LCSIM should run :type inputslcio: list of strings :param jars: list of jar files that should be added in the classpath definition :type jars: list of strings :param string cachedir: folder that holds the cache directory, instead of Home :param string outputFile: File name of the output :param string outputDSTFile: filename of the DST file :param string outputRECFile: filename of the REC file :param bool debug: By default set verbosity to true :return: S_OK(string) """ printtext = '' tree = ElementTree() try: tree.parse(inputlcsim) except Exception, x: print "Found Exception %s %s" % (Exception, x) return S_ERROR("Found Exception %s %s" % (Exception, x))
def parse_test_objects(category, feature_name, percentile, trial, paths, feature_paths): info_file = "/Users/isa/Experiments/bof_bmvc12/trial_" + str(trial) + "/bof_category_test_info.xml" info_tree = ElementTree(); info_tree.parse(info_file); scene_elms = info_tree.findall('scene'); print 'Found: ' + str(len(scene_elms)) + 'scenes' for scene in scene_elms: site_name = scene.get('site_name'); obj_elms = scene.findall('object'); if obj_elms is None: print "Invalid scene info file: No objects element" sys.exit(-1); print 'Found: ' + str(len(obj_elms)) + 'objects' for elm in obj_elms: class_name = elm.get('class_name'); if(class_name==category): mesh_name = elm.get('ply_name') ply_path = "/data/helicopter_providence_3_12/" + site_name + "/objects_with_aux/" + category + "_" + str(percentile) + "/" + mesh_name + ".ply"; feature_path = "/Users/isa/Experiments/shape_features_bmvc12/" + site_name + "/" + feature_name + "/" + category + "_" + str(percentile) + "/" + mesh_name + ".npy"; paths.append(ply_path); feature_paths.append(feature_path)
def insert_to_Summary(sumaryfile,total_case,pass_case,pass_rate,fail_case,fail_rate,block_case,block_rate): try: tree = ElementTree() tree.parse(sumaryfile) root = tree.getroot() ntotal_case = root.getiterator("total_case") ntotal_case[0].text = str(total_case) npass_case = root.getiterator("pass_case") npass_case[0].text = str(pass_case) npass_case_rate = root.getiterator("pass_rate") npass_case_rate[0].text = str(pass_rate) nfail_case = root.getiterator("fail_case") nfail_case[0].text = str(fail_case) nfail_case_rate = root.getiterator("fail_rate") nfail_case_rate[0].text = str(fail_rate) nblock_case = root.getiterator("block_case") nblock_case[0].text = str(block_case) nblock_case_rate = root.getiterator("block_rate") nblock_case_rate[0].text = str(block_rate) test_end_time = datetime.now().strftime('%m-%d-%H:%M:%S') ntest_start_time = root.getiterator("start_at") ntest_start_time[0].text = str(test_start_time) ntest_end_time = root.getiterator("end_at") ntest_end_time[0].text = str(test_end_time) device_id_get = root.getiterator("environment") device_id_get[0].set("device_id",Device_Ip) tree.write(sumaryfile) except Exception,e: print Exception,"Insert to report/summart.xml -------------------------> error:",e
def read_xml(in_path): #读取并解析xml文件 #in_path:xml路径 #return:ElementTree tree = ElementTree() tree.parse(in_path) return tree
def read_xml(in_path): '''读取并解析xml文件 in_path: xml路径 return: ElementTree''' tree = ElementTree() tree.parse(in_path) return tree
def __init__(self): #Running gtk.Window's init method super(MainWindow, self).__init__() self.set_size_request(280,700) #connect gui close button to quit signal self.connect("destroy", gtk.main_quit) #The table is the real gui, the window just holds it. #Gizmos are added to the table, not the window. self.table = gtk.Table(12,4,True) #---------------------------------- etree = ElementTree() etree.parse("launchers.xml") #XML insists on nesting everything a dozen times launchers = etree.find("launchers") for i, launcherConfig in enumerate(launchers.getchildren()): launcher = gtk.Button() launcher.set_label(launcherConfig.find("name").text) self.table.attach(launcher, 0, 1, i, i+1) #----------------------------------- #add the table to the window self.add(self.table) #if you don't show or show_all, no gui self.show_all()
def _getTVDBThumbnail(self): import os, time if self.id: # check if the file already exists if os.path.isfile(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml'): # if it is older than config['cacherenew'] days, delete the files and download again if os.path.getctime(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') < time.time()-(Config['cacherenew']*86400): os.remove(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') if not os.path.isfile(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml'): URL('http://www.thetvdb.com/api/'+Config['tvdbapikey']+'/series/'+self.id+'/all/'+Config['tvdblang']+'.xml').download(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') from xml.etree.ElementTree import ElementTree tree = ElementTree() try: tree.parse(Config['tmpdir']+self.id+'-'+Config['tvdblang']+'.xml') if Config['posterforpilot'] == True and self.season == 1 and self.episode == 1: series = tree.find('Series') if series.find('poster').text: self.thumbnail = 'http://www.thetvdb.com/banners/'+series.find('poster').text return True for episode in tree.findall('Episode'): if int(episode.find('SeasonNumber').text) == self.season and int(episode.find('EpisodeNumber').text) == self.episode: if episode.find('filename').text: self.thumbnail = 'http://www.thetvdb.com/banners/'+episode.find('filename').text return True except: pass return False
def import_opml(opml_file): tree = ElementTree() tree.parse(opml_file) outlines = tree.findall(".//outline") tag = None # TODO: fix this for all opml formats for o in outlines: xmlurl = None try: xmlurl = o.attrib['xmlUrl'] except: tag = o.attrib['text'] if xmlurl: try: # print "inserting ", tag, o.attrib['xmlUrl'], o.attrib['htmlUrl'], o.attrib['text'], tag f = { '_id': str(uuid.uuid1()), 'title': o.attrib['text'], 'url': o.attrib['xmlUrl'], 'web_url': o.attrib['htmlUrl'], 'tag': tag, } db.feeds.update({'url': f['url']}, f, True) except: pass
def parse_config(name): tree = ElementTree() tree.parse(name) items = [] for item in list(tree.getroot()): items.append(process_element(item)) return items
def parse_scenes_info(scenes_info_file, model_dirs, ply_paths): print 'Parsing: ' + scenes_info_file #parse xml file bof_tree = ElementTree(); bof_tree.parse(scenes_info_file); scenes_elm = bof_tree.getroot().findall('scene'); if scenes_elm is None: print "Invalid bof info file: No scenes element" sys.exit(-1); #find scene paths for s in range(0, len(scenes_elm)): path = scenes_elm[s].get("output_dir"); ply_file = scenes_elm[s].get("ply_path"); if path is None: print "Invalid info file: Error parsing scene path" sys.exit(-1); if ply_file is None: print "Invalid info file: Error parsing ply_file" sys.exit(-1); model_dirs.append(path); ply_paths.append(ply_file);
def updatetrafficlights(self): tree = ElementTree() tree.parse(self.trafficsignaldataxml) #Get traffic light states #format: ['202305458', 'grygrygrygrygrygry'], ['202305472', 'gGGGgGGGGgGGGgGGGG']]} trafficlights = self.getTrafficLightValues() for trafficlight in trafficlights: trafficlightID = trafficlight[0] linklights = list(trafficlight[1]) for link_index in range(len(linklights)): items = tree.findall('trafficlight') for item in items: # Remove letter g from links purestr = str(item.attrib['link_index']).replace("g", "") link_indexes = purestr.split("!") # if the traffic light id matches if str(item.attrib['intersection_id']) == trafficlightID: # if link index is in the link_indexes if str(link_index) in link_indexes: item.attrib['state'] = getTrafficLightState(linklights[link_index]) #logging.info({"id":trafficlight,"intersection id":trafficlightID,"link index":link_index, "state":getTrafficLightState(linklights[link_index])}) tree.write(self.trafficsignaldataxml)
def print_error_reports_from_report_file(file_path): tree = ElementTree() try: tree.parse(file_path) except: print "-" * 50 print "Error parsing {0!s}".format(file_path) f = open(file_path, "r"); print f.read(); print "-" * 50 return testcases = tree.findall(".//testcase") for testcase in testcases: error = testcase.find("error") if error is not None: print_detail_information(testcase, error) fail = testcase.find("fail") if fail is not None: print_detail_information(testcase, fail) failure = testcase.find("failure") if failure is not None: print_detail_information(testcase, failure)
def ParseReaderFile(fname): xml_reader = ElementTree() xml_reader.parse(fname) reading_label = None time_in_sec = None well = (0, 0) measurement = None plate_values = {} for e in xml_reader.getiterator(): if e.tag == 'Section': reading_label = e.attrib['Name'] TIME = e.attrib['Time_Start'] TIME = TIME[:19] TS = time.strptime(TIME, fmt) time_in_sec = calendar.timegm(TS) plate_values[reading_label] = {} plate_values[reading_label][time_in_sec] = {} elif e.tag == 'Well': W = e.attrib['Pos'] well_row = ord(W[0]) - ord('A') well_col = int(W[1:]) - 1 well = (well_row, well_col) elif e.tag == 'Multiple': if e.attrib['MRW_Position'] == 'Mean': measurement = e.text plate_values[reading_label][time_in_sec][well] = float(measurement) elif e.tag == 'Single': measurement = e.text if measurement == "OVER": plate_values[reading_label][time_in_sec][well] = None else: plate_values[reading_label][time_in_sec][well] = float(measurement) return plate_values
def LoadXml(self, xmlFilename = None, anrpBaseDirStr = None): #生成 XML 文件的绝对路径 if xmlFilename == None: xmlFilename = self.packageName xmlFilename = '%s.anrp.rpd.xml' % xmlFilename if anrpBaseDirStr != None: xmlFilename = os.path.join(anrpBaseDirStr, xmlFilename) tree = ElementTree() tree.parse(xmlFilename) resPackageDefinedRoot = tree.getroot() self.packageName = resPackageDefinedRoot.find('packageName').text self.type = resPackageDefinedRoot.find('type').text self.resSourceList = [] for resSourceX in resPackageDefinedRoot.findall('resSourceList'): resSourceDict = {} if anrpBaseDirStr == None: resSourceDict['resPath'] = resSourceX.get('resClass') else: resSourceDict['resPath'] = os.path.join(anrpBaseDirStr, resSourceX.get('resClass')) resSourceDict['resName'] = resSourceX.get('resId') self.resSourceList.append(resSourceDict) return
def getWorkflow(styleneRUNS, setName): ''' Retrieve workflow id for train and test. Example: <run> <type>TRAINING</type> <run-number>1</run-number> <set-name>fold-0</set-name> <date>6/22/11 11:26 AM</date> <workflow-map>stylenerun/data//Workflow_5e1cefa6-b730-425c-99fc-7e2cc0e1d9bd_TRAINING_1_fold-0</workflow-map> <url /> </run> <run> <type>TEST</type> <run-number>1</run-number> <set-name>fold-0</set-name> <date>6/22/11 11:27 AM</date> <workflow-map>stylenerun/data//Workflow_2c1f86e0-bb05-4ab8-a6df-171c32d6def5_TEST_1_fold-0</workflow-map> <url></url> </run> ''' tree = ElementTree() tree.parse(styleneRUNS) runs = tree.findall('run') trainflow, testflow = zip(runs[::2], runs[1::2])[0] assert testflow.find('type').text.lower() in ['test', 'testing'] assert trainflow.find('set-name').text == testflow.find('set-name').text == setName trainflow, testflow = trainflow.find('workflow-map').text, testflow.find('workflow-map').text return (trainflow, testflow)
def main(): stop_objs = {} for service in SERVICE_MNEMONICS.split(','): print "INSERT INTO service (mnemonics) VALUES ('%s');" % service tree = ElementTree() tree.parse('data/%s.xml' % service) markers = tree.find('markers') stops = markers.getiterator('busStop') for stop in stops: sms = stop.find('sms').text name = stop.find('nom').text x = stop.find('x').text y = stop.find('y').text stop_obj = stop_objs.get(sms, None) if stop_obj is None: stop_obj = BusStop(sms, name, x, y) stop_objs[sms] = stop_obj stop_obj.routes.append(service) for stop in stop_objs.values(): print stop.sql()
def _render(self, parent): qrcode = self._generate() if not qrcode: return # Parse SVG and draw elements to the workspace output = StringIO() qrcode.save(output) output.seek(0) tree = ElementTree() tree.parse(output) root = tree.getroot() vbox = map(int, root.get("viewBox").split()) vbox = vbox[0]-self.options.padding*self.options.size/10, \ vbox[1]-self.options.padding*self.options.size/10, \ vbox[2]+2*self.options.padding*self.options.size/10, \ vbox[3]+2*self.options.padding*self.options.size/10 vbox = map(str, vbox) rect = inkex.etree.SubElement( parent, inkex.addNS('rect', 'svg'), {"x": vbox[0], "y": vbox[1], "width": vbox[2], "height": vbox[3], "style": "fill:#fff;"}) for m in root.getchildren(): attribs = {} for k in m.keys(): attribs[k] = str(m.get(k)) inkex.etree.SubElement(parent, inkex.addNS('path', 'svg'), attribs)
def on_packagelist_cursor_changed(self, widget, data=None): (tree, cursor) = self.pkgview.get_selection().get_selected() resources_file = resolve_res(tree.get_value(cursor, 1)) resources = ElementTree() resources.parse(resources_file['xml']) self.res.clear() for resource in resources.iterfind('item'): res_file = resolve_res(resource.text) res_in = [s for s in drawable_specifiers if s in res_file] if 'png' in res_file: res_img = gtk.gdk.pixbuf_new_from_file(res_file['png']) elif res_in: res_img = gtk.gdk.pixbuf_new_from_file(res_file[res_in[0]]) elif 'xml' in res_file: # trace into this file deep_res = ElementTree() deep_res.parse(res_file['xml']) res_img = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, 1, 1) for item in deep_res.iterfind('item'): if nsify('drawable') in item.attrib: res_img = gtk.gdk.pixbuf_new_from_file(resolve_res(item.attrib[nsify('drawable')])['png']) else: res_img = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, 1, 1) self.res.append([ resource.attrib['name'], resource.text, res_img, ])
def parse_taylor_info(taylor_info_file, scene_blocks): print 'Parsing: ' + taylor_info_file #parse xml file taylor_tree = ElementTree(); taylor_tree.parse(taylor_info_file); #find scene paths scenes_elm = taylor_tree.getroot().findall('scene'); if scenes_elm is None: print "Invalid taylor info file: No scenes element" sys.exit(-1); for s in range(0, len(scenes_elm)): scene_name = scenes_elm[s].get("path") if scene_name is None: print "Invalid taylor info file: Error parsing scenes" sys.exit(-1); blocks = []; blocks.append(s); parse_scene(scene_name, blocks); scene_blocks.append(blocks);
def main(): parser = argparse.ArgumentParser(description="convert Mitsuba scenes to appleseed format.") parser.add_argument("input_file", metavar="input-file", help="Mitsuba scene (*.xml)") parser.add_argument("output_file", metavar="output-file", help="appleseed scene (*.appleseed)") args = parser.parse_args() # Create a log target that outputs to stderr, and binds it to the renderer's global logger. # Eventually you will want to redirect log messages to your own target. # For this you will need to subclass appleseed.ILogTarget. log_target = asr.ConsoleLogTarget(sys.stderr) # It is important to keep log_target alive, as the global logger does not # take ownership of it. In this example, we do that by removing the log target # when no longer needed, at the end of this function. asr.global_logger().add_target(log_target) asr.global_logger().set_verbosity_level(asr.LogMessageCategory.Warning) tree = ElementTree() try: tree.parse(args.input_file) except IOError: fatal("Failed to load {0}".format(args.input_file)) project = convert(tree) asr.ProjectFileWriter().write(project, args.output_file, asr.ProjectFileWriterOptions.OmitHandlingAssetFiles)
else: machines_dir = None if machines_dir is None: compiler_xml = None else: compiler_xml = os.path.join(machines_dir, "config_compilers.xml") assert file_exists(compiler_xml), "Machines directory should be "+ \ machines_dir+" but no config_compilers.xml is there!" # Get test specification directories from command line options. suite_specs = [] if options.xml_test_list is not None: test_xml_tree = ElementTree() test_xml_tree.parse(options.xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(options.xml_test_list)), } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if options.test_spec_dir is not None: suite_specs.append( TestSuiteSpec("__command_line_test__", ["__command_line_test__"], [os.path.abspath(options.test_spec_dir)])) # Create build directory if necessary. build_dir = os.path.abspath(options.build_dir) if not file_exists(build_dir): os.mkdir(build_dir)
def _main(): output, build_dir, build_optimized, clean,\ cmake_args, compiler, enable_genf90, machine, machines_dir,\ make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ use_openmp, xml_test_list, verbose \ = parse_command_line(sys.argv) #================================================= # Find directory and file paths. #================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML if xml_test_list is not None: test_xml_tree = ElementTree() test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( TestSuiteSpec("__command_line_test__", ["__command_line_test__"], [os.path.abspath(test_spec_dir)])) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") machobj = Machines(infile=machines_file, machine=machine) else: machobj = Machines(machine=machine) # Create build directory if necessary. build_dir = os.path.abspath(build_dir) if not os.path.isdir(build_dir): os.mkdir(build_dir) # Switch to the build directory. os.chdir(build_dir) #================================================= # Functions to perform various stages of build. #================================================= if not use_mpi: mpilib = "mpi-serial" elif mpilib is None: mpilib = machobj.get_default_MPIlib() logger.info("Using mpilib: {}".format(mpilib)) if compiler is None: compiler = machobj.get_default_compiler() logger.info("Compiler is {}".format(compiler)) compilerobj = Compilers(machobj, compiler=compiler, mpilib=mpilib) pfunit_path = find_pfunit(compilerobj, mpilib=mpilib, use_openmp=use_openmp) debug = not build_optimized os_ = machobj.get_value("OS") # Create the environment, and the Macros.cmake file # # configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, os_, unit_testing=True) machspecific = EnvMachSpecific(build_dir, unit_testing=True) fake_case = FakeCase(compiler, mpilib, debug) machspecific.load_env(fake_case) os.environ["OS"] = os_ os.environ["COMPILER"] = compiler os.environ["DEBUG"] = stringify_bool(debug) os.environ["MPILIB"] = mpilib if use_openmp: os.environ["compile_threaded"] = "true" else: os.environ["compile_threaded"] = "false" os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH logger.info("Setting NETCDF environment variable: {}".format( os.environ["NETCDF_PATH"])) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { "compiler": compiler, "mpilib": mpilib, "threaded": use_openmp, "unit_testing": True } # We can get away with specifying case=None since we're using exe_only=True mpirun_command, _ = machspecific.get_mpirun(case=None, attribs=mpi_attribs, exe_only=True) mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.info("mpirun command is '{}'".format(mpirun_command)) #================================================= # Run tests. #================================================= for spec in suite_specs: os.chdir(build_dir) if os.path.isdir(spec.name): if clean: rmtree(spec.name) if not os.path.isdir(spec.name): os.mkdir(spec.name) for label, directory in spec: os.chdir(os.path.join(build_dir, spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) name = spec.name + "/" + label if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") use_mpiserial = not use_mpi cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose, enable_genf90=enable_genf90, cmake_args=cmake_args) make_stage(name, output, make_j, clean=clean, verbose=verbose) for spec in suite_specs: os.chdir(os.path.join(build_dir, spec.name)) for label, directory in spec: name = spec.name + "/" + label output.print_header("Running CTest tests for " + name + ".") ctest_command = ["ctest", "--output-on-failure"] if verbose: ctest_command.append("-VV") if ctest_args is not None: ctest_command.extend(ctest_args.split(" ")) run_cmd_no_fail(" ".join(ctest_command), from_dir=label, arg_stdout=None, arg_stderr=subprocess.STDOUT)
from xml.etree.ElementTree import ElementTree tree = ElementTree() #parse the XML file - test.xml root = tree.parse("test.xml") #iterate over items to find specific product number - 12345678 for node in root.findall('./Items'): for type in list(node): if type.find('ProductNo').text == "12345678": print(type.find('MD5').text) print('found') else: print("not found")
def get_hiseq_container(run_dir): tree = ElementTree() tree.parse(os.path.join(run_dir, "RunInfo.xml")) # HiSeq return tree.find("Run/Flowcell").text
from xml.etree.ElementTree import ElementTree as ET from svg_util.etree import find, findall from svg_util.css import css_dict, css_string, set_property, get_property from copy import deepcopy tree = ET() tree.parse("./tests/addabove/A.svg") root = tree.getroot() g = find(root, "g") rect = find(root, "g/rect") rect2 = deepcopy(rect) rect2.attrib['id'] = rect.attrib['id'] + "2" rect2.attrib['x'] = str(float(rect.attrib['x']) + float(rect.attrib['width'])) g.append(rect2) tree.write('bla3.svg')
def parseXML(trackletFile): """ Parses tracklet xml file and convert results to list of Tracklet objects :param trackletFile: name of a tracklet xml file :returns: list of Tracklet objects read from xml file """ # convert tracklet XML data to a tree structure eTree = ElementTree() print('Parsing tracklet file', trackletFile) with open(trackletFile) as f: eTree.parse(f) # now convert output to list of Tracklet objects trackletsElem = eTree.find('tracklets') tracklets = [] trackletIdx = 0 nTracklets = None for trackletElem in trackletsElem: # print 'track:', trackletElem.tag if trackletElem.tag == 'count': nTracklets = int(trackletElem.text) print('File contains', nTracklets, 'tracklets') elif trackletElem.tag == 'item_version': pass elif trackletElem.tag == 'item': # print 'tracklet {0} of {1}'.format(trackletIdx, nTracklets) # a tracklet newTrack = Tracklet() isFinished = False hasAmt = False frameIdx = None for info in trackletElem: # print 'trackInfo:', info.tag if isFinished: raise ValueError('more info on element after finished!') if info.tag == 'objectType': newTrack.objectType = info.text elif info.tag == 'h': newTrack.size[0] = float(info.text) elif info.tag == 'w': newTrack.size[1] = float(info.text) elif info.tag == 'l': newTrack.size[2] = float(info.text) elif info.tag == 'first_frame': newTrack.firstFrame = int(info.text) elif info.tag == 'poses': # this info is the possibly long list of poses for pose in info: # print 'trackInfoPose:', pose.tag if pose.tag == 'count': # this should come before the others if newTrack.nFrames is not None: raise ValueError('there are several pose lists for a single track!') elif frameIdx is not None: raise ValueError('?!') newTrack.nFrames = int(pose.text) newTrack.trans = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) newTrack.rots = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) newTrack.states = np.nan * np.ones(newTrack.nFrames, dtype='uint8') newTrack.occs = np.nan * np.ones((newTrack.nFrames, 2), dtype='uint8') newTrack.truncs = np.nan * np.ones(newTrack.nFrames, dtype='uint8') newTrack.amtOccs = np.nan * np.ones((newTrack.nFrames, 2), dtype=float) newTrack.amtBorders = np.nan * np.ones((newTrack.nFrames, 3), dtype=float) frameIdx = 0 elif pose.tag == 'item_version': pass elif pose.tag == 'item': # pose in one frame if frameIdx is None: raise ValueError('pose item came before number of poses!') for poseInfo in pose: # print 'trackInfoPoseInfo:', poseInfo.tag if poseInfo.tag == 'tx': newTrack.trans[frameIdx, 0] = float(poseInfo.text) elif poseInfo.tag == 'ty': newTrack.trans[frameIdx, 1] = float(poseInfo.text) elif poseInfo.tag == 'tz': newTrack.trans[frameIdx, 2] = float(poseInfo.text) elif poseInfo.tag == 'rx': newTrack.rots[frameIdx, 0] = float(poseInfo.text) elif poseInfo.tag == 'ry': newTrack.rots[frameIdx, 1] = float(poseInfo.text) elif poseInfo.tag == 'rz': newTrack.rots[frameIdx, 2] = float(poseInfo.text) elif poseInfo.tag == 'state': newTrack.states[frameIdx] = stateFromText[poseInfo.text] elif poseInfo.tag == 'occlusion': newTrack.occs[frameIdx, 0] = occFromText[poseInfo.text] elif poseInfo.tag == 'occlusion_kf': newTrack.occs[frameIdx, 1] = occFromText[poseInfo.text] elif poseInfo.tag == 'truncation': newTrack.truncs[frameIdx] = truncFromText[poseInfo.text] elif poseInfo.tag == 'amt_occlusion': newTrack.amtOccs[frameIdx, 0] = float(poseInfo.text) hasAmt = True elif poseInfo.tag == 'amt_occlusion_kf': newTrack.amtOccs[frameIdx, 1] = float(poseInfo.text) hasAmt = True elif poseInfo.tag == 'amt_border_l': newTrack.amtBorders[frameIdx, 0] = float(poseInfo.text) hasAmt = True elif poseInfo.tag == 'amt_border_r': newTrack.amtBorders[frameIdx, 1] = float(poseInfo.text) hasAmt = True elif poseInfo.tag == 'amt_border_kf': newTrack.amtBorders[frameIdx, 2] = float(poseInfo.text) hasAmt = True else: raise ValueError('unexpected tag in poses item: {0}!'.format(poseInfo.tag)) frameIdx += 1 else: raise ValueError('unexpected pose info: {0}!'.format(pose.tag)) elif info.tag == 'finished': isFinished = True else: raise ValueError('unexpected tag in tracklets: {0}!'.format(info.tag)) # end: for all fields in current tracklet # some final consistency checks on new tracklet if not isFinished: warn('tracklet {0} was not finished!'.format(trackletIdx)) if newTrack.nFrames is None: warn('tracklet {0} contains no information!'.format(trackletIdx)) elif frameIdx != newTrack.nFrames: warn('tracklet {0} is supposed to have {1} frames, but perser found {1}!'.format(trackletIdx, newTrack.nFrames, frameIdx)) if np.abs(newTrack.rots[:, :2]).sum() > 1e-16: warn('track contains rotation other than yaw!') # if amtOccs / amtBorders are not set, set them to None if not hasAmt: newTrack.amtOccs = None newTrack.amtBorders = None # add new tracklet to list tracklets.append(newTrack) trackletIdx += 1 else: raise ValueError('unexpected tracklet info') # end: for tracklet list items print('Loaded', trackletIdx, 'tracklets.') # final consistency check if trackletIdx != nTracklets: warn('according to xml information the file has {0} tracklets, but parser found {1}!'.format(nTracklets, trackletIdx)) return tracklets
def _load_jmdict(fname): with lzma.open(fname, 'rt', encoding='utf-8') as f: tree = ElementTree() tree.parse(f) data = set() def parse_entry(entry): kanji = [ Variant( ele.find('keb').text, [EntryType(inf.text) for inf in ele.findall('ke_inf')], [pri.text for pri in ele.findall('ke_pri')]) for ele in entry.findall('k_ele') ] # FIXME ignoring re_restr for now readings = [ Variant( ele.find('reb').text, [EntryType(inf.text) for inf in ele.findall('re_inf')] + ([EntryType('nokanji')] if ele.find('re_nokanji') else []), [pri.text for pri in ele.findall('re_pri')]) for ele in entry.findall('r_ele') ] # NOTE: We are ignoring any <etym>, <bibl> or <audit> elements. The former two are not in use as of the time # this code is written (September 2014) and the latter one does not seem interesting at the moment. - jaseg links = [ Link( link.find('link_tag').text, link.find('link_desc').text, link.find('link_uri').text) for info in entry.findall('info') for link in info.findall('links') ] # NOTE: For now, we are ignoring the g_gend attribute as well as the <pri> and <example> elements since # these are not used at the time this code is written (September 2014). - jaseg # <!ELEMENT sense (s_inf*, lsource*, gloss*)> sense_elems = entry.findall('sense') translations = [] for sense in sense_elems: stagk = [stagk.text for stagk in sense.findall('stagk')] stagr = [stagr.text for stagr in sense.findall('stagr')] pos = [EntryType(pos.text) for pos in sense.findall('pos')] xref = [xref.text for xref in sense.findall('xref')] ant = [ant.text for ant in sense.findall('ant')] field = [field.text for field in sense.findall('field')] misc = [EntryType(misc.text) for misc in sense.findall('misc')] dial = [EntryType(dial.text) for dial in sense.findall('dial')] sinf = [inf.text for inf in sense.findall('s_inf')] gloss_dict = groupdict( (gloss.get(LANG_ATTR, 'eng'), gloss.text) for gloss in sense.findall('gloss')) gloss = gloss_dict['eng'] translations.append( Translation(gloss, gloss_dict, stagk, stagr, pos, xref, ant, field, misc, dial, sinf)) return Entry(kanji, readings, translations, links) entries = [ parse_entry(entry) for entry in tree.getroot().findall('entry') ] mapping = groupdict((key.moji, entry) for entry in entries for key in entry.kanji + entry.readings) return entries, mapping
def boxm2CreateScene(scene_info, boxm2_dir, app1='boxm2_mog3_grey', app2='boxm2_num_obs', app3='boxm2_sum_log_msg_pos'): if not os.path.isdir(boxm2_dir + '/'): os.mkdir(boxm2_dir + '/') if not os.path.isdir(boxm2_dir + '/'): print "Invalid scene xml" sys.exit(-1) print 'Parsing: ' print scene_info print boxm2_dir #parse xml file tree = ElementTree() tree.parse(scene_info) #find scene dimensions bbox_elm = tree.getroot().find('bbox') if bbox_elm is None: print "Invalid scene info file: No bbox" sys.exit(-1) minx = float(bbox_elm.get('minx')) miny = float(bbox_elm.get('miny')) minz = float(bbox_elm.get('minz')) maxx = float(bbox_elm.get('maxx')) maxy = float(bbox_elm.get('maxy')) maxz = float(bbox_elm.get('maxz')) #find scene resolution res_elm = tree.getroot().find('min_octree_cell_length') if res_elm is None: print "Invalid info file: No min_octree_cell_length" sys.exit(-1) resolution = float(res_elm.get('val')) print("Resolution: " + str(resolution)) res_elm = tree.getroot().find('prior_probability') if res_elm is None: print "Invalid info file: No prior_probability" sys.exit(-1) prior_probability = float(res_elm.get('val')) #PARAMETERS ntrees_x = 32 #was 32 ntrees_y = 32 #was 32 ntrees_z = 32 #was 32 max_num_lvls = 4 min_pt = [minx, miny, minz] max_pt = [maxx, maxy, maxz] writeSceneFromBox(boxm2_dir, resolution, min_pt, max_pt, ntrees_x, ntrees_y, ntrees_z, max_num_lvls, app1, app2, app3, prior_probability)
def load_file(path: str) -> ET: tree = ET() tree.parse(path) return tree
def getfavorites(addon_id = None, section = None,mode='section'): list1 = [] debug = True print "addon_id,section",addon_id,section if debug: tree = ElementTree() tree.parse(xmlfile) root = tree.getroot() i = 0 for addon in root.iter('addon'): try: id = str(addon.get('id')) except: id = None try: section_id = str(addon.get('section')) except: section_id = None section_id = section_id.replace('_playlist', '') print "addon_id,section,section_id",addon_id,section,section_id if mode == 'desktop': try:id=id.replace(".","/") except:continue for media in addon.iter('media'): title = str(media.attrib.get('title')) url = str(media.text) print '75id,section_id', id, section_id list1.append((title, url, id, section_id, i)) i = i + 1 elif mode=='addon': if id == addon_id: for media in addon.iter('media'): title = str(media.attrib.get('title')) url = str(media.text) print '75id,section_id', id, section_id list1.append((title, url, id, section_id, i)) i = i + 1 elif mode=='section': for media in addon.iter('media'): title = str(media.attrib.get('title')) url = str(media.text) print '94id,section_id', id, section_id list1.append((title, url, id, section_id, i)) i = i + 1 print 'list1', list1 return list1 else: list1.append(('Error reading favorites,repair favorites from Tools', '', '', '', 1)) print 'error in reading favorite xml file' return list1 return
def parse_wp_xml(file): ns = { '': '', #this is the default namespace 'excerpt': "{http://wordpress.org/export/1.1/excerpt/}", 'content': "{http://purl.org/rss/1.0/modules/content/}", 'wfw': "{http://wellformedweb.org/CommentAPI/}", 'dc': "{http://purl.org/dc/elements/1.1/}", 'wp': "{http://wordpress.org/export/1.1/}" } tree = ElementTree() print "reading: " + wpe root = tree.parse(file) c = root.find('channel') def parse_header(): return { "title": unicode(c.find('title').text), "link": unicode(c.find('link').text), "description": unicode(c.find('description').text) } def parse_items(): export_items = [] xml_items = c.findall('item') for i in xml_items: taxanomies = i.findall('category') export_taxanomies = {} for tax in taxanomies: if not "domain" in tax.attrib: continue t_domain = unicode(tax.attrib['domain']) t_entry = unicode(tax.text) if not (t_domain in taxonomy_filter) and not ( t_domain in taxonomy_entry_filter and taxonomy_entry_filter[t_domain] == t_entry): if not t_domain in export_taxanomies: export_taxanomies[t_domain] = [] export_taxanomies[t_domain].append(t_entry) def gi(q, unicode_wrap=True): namespace = '' tag = '' if q.find(':') > 0: namespace, tag = q.split(':', 1) else: tag = q result = i.find(ns[namespace] + tag).text if unicode_wrap: result = unicode(result) return result body = gi('content:encoded') img_srcs = [] if body is not None: try: soup = BeautifulSoup(body) img_tags = soup.findAll('img') for img in img_tags: img_srcs.append(img['src']) except: print "could not parse html: " + body #print img_srcs export_item = { 'title': gi('title'), 'author': gi('dc:creator'), 'date': gi('wp:post_date'), 'slug': gi('wp:post_name'), 'status': gi('wp:status'), 'type': gi('wp:post_type'), 'wp_id': gi('wp:post_id'), 'taxanomies': export_taxanomies, 'body': body, 'img_srcs': img_srcs } export_items.append(export_item) return export_items return { 'header': parse_header(), 'items': parse_items(), }
class EsiUtil: def __init__(self): pass def get_ESI_files_by_vendor(self, vendor_id): files = self.get_ESI_files() ret = list() for pair in files: file_path = pair[1] xml_esi = self.load_esi(file_path) if xml_esi != None: xml_vendor = xml_esi.find('Vendor') if xml_vendor != None: xml_id = xml_vendor.find('Id') if xml_id != None: id = YoUtil.get_int(xml_id.text) if id == vendor_id: ret.append(file_path) return ret def get_ESI_files(self, vendor_id=None, productCode=None): esi_folders = self.get_ESI_folders() esi_files = list() for pair in esi_folders: key = pair[0] folder = pair[1] files = YoUtil.get_list_of_files(folder, '.xml') for file in files: full_path = os.path.join(folder, file) esi_files.append((key, full_path)) if vendor_id == None and productCode == None: return esi_files ret = list() for pair in esi_files: file_path = pair[1] esi_file = EsiFile(file_path) if esi_file != None: esi_file.load_vendor() if esi_file.id == vendor_id: if productCode == None: ret.append((pair[0], esi_file.path)) else: esi_file.load_devices() for d in esi_file.devices: if d != None: if d.product_code == productCode: ret.append((pair[0], file_path)) break return ret def get_ESI_folders(self): ret = list() userESIPath = YoUtil.get_elmo_user_ESI_path() ret.append(('ElmoUserESI', userESIPath)) ret.append( ('EASESI', 'C:/Dev/eas/View/ElmoMotionControl.View.Main/EtherCATSlaveLib')) ret.append(('TwinCAT', 'C:/TwinCAT/3.1/Config/Io/EtherCAT')) return ret def create_esi_db(self, db_name): try: if os.path.exists(db_name): os.remove(db_name) YoUtil.debug_print('DB file deleted:', db_name) self.con = sqlite3.connect(db_name) cur = self.con.cursor() YoUtil.debug_print('SQLite version: ', cur.fetchone()) with self.con: cur.execute( "CREATE TABLE IF NOT EXISTS Vendors(VendorId INT, Name TEXT, Path TEXT, App TEXT)" ) cur.execute( "CREATE TABLE IF NOT EXISTS Devices(VendorId INT, productCode INT, revisionNumber INT, Name TEXT, Xml TEXT)" ) files_pair = self.get_ESI_files() for pair in files_pair: esi_path = pair[1] vendor_id, vendor_name = self.get_ESI_info(esi_path) if vendor_id != 0: with self.con: cur.execute( "INSERT INTO Vendors (VendorId,Name,Path,App) VALUES(?,?,?,?)", (vendor_id, vendor_name, esi_path, pair[0])) device_list = self.get_ESI_devices() for device in device_list: with self.con: cur.execute( "INSERT INTO Devices (VendorId,productCode,revisionNumber,Name) VALUES(?,?,?,?)", (vendor_id, device[0], device[1], device[2])) return True except sqlite3.Error as e: print('DB Error: ', e) return False finally: if self.con: self.con.close() def get_ESI_info(self, esi_path): esi = EsiFile(esi_path) vendor_id = 0 vendor_name = None self.esi_path = esi_path self.xml_esi = self.load_esi(esi_path) if self.xml_esi != None: xml_vendor = self.xml_esi.find('Vendor') if xml_vendor != None: xml_id = xml_vendor.find('Id') if xml_id != None: vendor_id = YoUtil.get_int(xml_id.text) xml_name = xml_vendor.find('Name') if xml_name != None: vendor_name = xml_name.text return vendor_id, vendor_name def get_ESI_devices(self): ret = list() #devices if self.xml_esi != None: YoUtil.debug_print('read devices of esi=', self.esi_path) xml_list_device = self.xml_esi.findall( 'Descriptions/Devices/Device') YoUtil.debug_print('num of devices in esi=', len(xml_list_device)) for xml_device in xml_list_device: pc = None rev = None name = None xml_type = xml_device.find('Type') if xml_type != None: msg1 = '' if 'ProductCode' in xml_type.attrib.keys(): pc = YoUtil.get_int(xml_type.attrib['ProductCode']) msg1 = msg1 + 'ProductCode=' + hex(pc) if 'RevisionNo' in xml_type.attrib.keys(): rev = YoUtil.get_int(xml_type.attrib['RevisionNo']) msg1 = msg1 + ' RevisionNo:' + hex(rev) if len(msg1) > 0: YoUtil.debug_print(msg1, '') if pc != None: ret.append((pc, rev, name)) return ret def get_devices(self, vendor_id, productCode, revisionNumber): ret = list() files = self.get_ESI_files_by_vendor(vendor_id) YoUtil.debug_print('num of files=', len(files)) for file_path in files: xml_esi = self.load_esi(file_path) if xml_esi != None: xml_list_device = xml_esi.findall( 'Descriptions/Devices/Device') YoUtil.debug_print('num of devices=', len(xml_list_device)) for xml_device in xml_list_device: xml_type = xml_device.find('Type') if xml_type != None: pc = YoUtil.get_int(xml_type.attrib['ProductCode']) YoUtil.debug_print('ProductCode=', pc) if pc == productCode: ret.append(file_path) return ret def load_esi(self, esi_path): self.tree = ET() self.tree.parse(esi_path) return self.tree.getroot()
class TreeReader: ## Standard Constructor # @param self object pointer # @oaran fileName path to XML file def __init__(self, fileName): self.__xmltree = ElementTree() self.__xmltree.parse(fileName) self.__NTrees = int(self.__xmltree.find("Weights").get('NTrees')) ## Returns the number of trees # @param self object pointer def getNTrees(self): return (self.__NTrees) # Returns DOM object to selected tree # @param self object pointer # @param itree the index of tree def __getBinaryTree(self, itree): if self.__NTrees <= itree: print("to big number, tree number must be less then %s" % self.__NTrees) return 0 return self.__xmltree.find("Weights").find("BinaryTree[" + str(itree + 1) + "]") ## Reads the tree # @param self the object pointer # @param binaryTree the tree DOM object to be read # @param tree empty object, this will be filled # @param depth current depth def __readTree(self, binaryTree, tree={}, depth=0): nodes = binaryTree.findall("Node") if len(nodes) == 0: return if len(nodes) == 1 and nodes[0].get("pos") == "s": info = { "IVar": nodes[0].get("IVar"), "Cut": nodes[0].get("Cut"), "purity": nodes[0].get("purity"), "pos": 0 } tree["info"] = info tree["children"] = [] self.__readTree(nodes[0], tree, 1) return for node in nodes: info = { "IVar": node.get("IVar"), "Cut": node.get("Cut"), "purity": node.get("purity"), "pos": node.get("pos") } tree["children"].append({"info": info, "children": []}) self.__readTree(node, tree["children"][-1], depth + 1) ## Public function which returns the specified tree object # @param self the object pointer # @param itree selected tree index def getTree(self, itree): binaryTree = self.__getBinaryTree(itree) if binaryTree == 0: return {} tree = {} self.__readTree(binaryTree, tree) return tree ## Returns a list with input variable names # @param self the object pointer def getVariables(self): varstree = self.__xmltree.find("Variables").findall("Variable") variables = [None] * len(varstree) for v in varstree: variables[int(v.get('VarIndex'))] = v.get('Expression') return variables
#!/usr/bin/env python3 import add_disks from xml.etree.ElementTree import ElementTree, Element from typing import List tree = ElementTree() root = tree.parse("./platform_graphene_s1.xml") for i in range(17, 34): add_disks.add_host(root, '1.25E8Bps', '1.25E8Bps', "1.0E-4s", "16.673E9f", 4, "graphene-" + str(i) + ".nancy.grid5000.fr") header = '''<?xml version='1.0'?> <!DOCTYPE platform SYSTEM "http://simgrid.gforge.inria.fr/simgrid/simgrid.dtd">''' with open("./platform_graphene_s1_full.xml", 'wb') as f: f.write(header.encode('utf8')) tree.write(f, 'utf-8', xml_declaration=False)
def main(query): tree = ElementTree() tree.parse("apps/data/BukuNyanyianHKBP.xml") all_doc_no = [] all_headline = [] all_text = [] for node in tree.iter("DOCNO"): all_doc_no.append(node.text) for node in tree.iter("HEADLINE"): all_headline.append(node.text) for node in tree.iter("TEXT"): all_text.append(node.text) N_DOC = len(all_text) all_sentence_doc = [] for i in range(N_DOC): all_sentence_doc.append(all_headline[i] + all_text[i]) tokens_doc = [] for i in range(N_DOC): tokens_doc.append(remove_punc_tokenize(all_sentence_doc[i])) for i in range(N_DOC): tokens_doc[i] = to_lower(tokens_doc[i]) stop_words = set(stopwords.words('indonesian')) stopping = [] for i in range(N_DOC): temp = [] for j in tokens_doc[i]: if j not in stop_words: temp.append(j) stopping.append(temp) for i in range(N_DOC): tokens_doc[i] = ([w for w in stopping[i] if not any(j.isdigit() for j in w)]) factory = StemmerFactory() stemmer = factory.create_stemmer() stemming = [] for i in range(N_DOC): temp=[] for j in tokens_doc[i]: # print(j) temp.append(stemmer.stem(j)) stemming.append(temp) all_tokens = [] for i in range(N_DOC): for w in stemming[i]: all_tokens.append(w) new_sentence = ' '.join([w for w in all_tokens]) for w in CountVectorizer().build_tokenizer()(new_sentence): all_tokens.append(w) all_tokens = set(all_tokens) alls = [] for i in all_tokens: alls.append(i) queri=[] spl = query.split() for i in range(len(spl)): if not spl[i].isdigit(): queri.append(spl[i]) punc = [] for i in range(len(queri)): no_punc = "" for j in range(len(queri[i])): if queri[i][j] not in string.punctuation: no_punc = no_punc + queri[i][j] punc.append(no_punc) lower=[] for i in range(len(punc)): lower.append(punc[i].lower()) stop = [] for i in range(len(lower)): if lower[i] not in stop_words: stop.append(lower[i]) stem = [] for i in range(len(stop)): stem.append(stemmer.stem(stop[i])) join_word = ' '.join([w for w in stem]) ngram, ngram_doc = generate_ngrams(stemming, len(stem)) n_gram_index = {} for ngram_token in ngram: doc_no = [] for i in range(N_DOC): if(ngram_token in ngram_doc[i]): doc_no.append(all_doc_no[i]) n_gram_index[ngram_token] = doc_no df = [] for i in range(N_DOC): count = 0 for j in range(len(ngram_doc[i])): if join_word == ngram_doc[i][j]: count+=1 df.append(count) idf = [] for i in range(len(df)): try: idf.append(math.log10(N_DOC/df[i])) except ZeroDivisionError: idf.append(str(0)) #w(t, d) #t = term #d = document wtd = [] l = [] for i in range(N_DOC): dic = {} tf = ngram_doc[i].count(join_word) # menghitung nilai tf if tf != 0: score = math.log10(tf) #log10(tf(t,d)) score+=1 # 1 + log(tf(t,d)) score*=idf[i] #tf * idf idx = all_doc_no[i] judul = all_headline[i] dic['docno'] = idx dic['judul'] = judul dic['score'] = score l.append(dic) wtd.append(l) # [i+1] = defenisi nomor dokumen; score = wtd # print(score) hasil = [] hasil.append(sorted(wtd[0], key = lambda x : x['score'], reverse = True)) return hasil
def main(argv): global pricingService global apiKey #Default values - will change these fileIn = '' fileOut = '' groupParts = False pricingService = '' #Which pricing service to use (currently 'F' = FindChips) ##### # COMMENT OUT FOR COMMAND LINE - THIS IS HERE FOR DEVELOPMENT TESTING ONLY ##### # fileIn = '' #replace with filename # groupParts = True # apiKey = '' #Put your API key here # pricingService= 'F' # fileOut = '' #replace with filename ###### ######################################################## #Process the Command Line logger.info('Command Line Arguments: %s', str(argv)) try: opts, args = getopt.getopt( argv, "hgfa:i:o:", ["help", "group", "apikey=", "input=", "output="]) except getopt.GetoptError: printUsage() logger.error('Invalid argument list provided') sys.exit(2) for opt, arg in opts: if opt in ("-i", "--input"): fileIn = arg if opt in ("-o", "--output"): fileOut = arg if opt in ("-a", "--apikey"): apiKey = arg if opt in ("-f", "--findchips"): pricingService = opt if opt in ("-g", "--group"): groupParts = True if opt in ("-h", "--help"): printUsage() #Perform error checking to make sure cmd-line params passed, files exist etc. checkParams(fileIn) #If no output file specified, create one: if fileOut == '': fileOut = fileIn[:fileIn.rfind( '.')] + '_BOM' #strip the extension off the Input file logger.info('No OUTPUT Filename specified - using: %s', fileOut) ######################################################## #Parse and process the XML netlist eTree = ElementTree() eTree.parse(fileIn) root = eTree.getroot() section = root.find( tagComponent) #Find the component-level in the XML Netlist #Loop through each component for component in section: processComponent( listOutput, component, groupParts) #Extract compoonent info and add to listOutput #Get Pricing if we need to. Need a Manufacturer Part No, and a Pricing Service to be specified if fldMfgPartNo in CSVFieldNames and pricingService != '': getPricing(listOutput) ######################################################## #Generate the Output CSV File with open(fileOut + '.csv', 'wb') as fOut: csvWriter = csv.DictWriter(fOut, delimiter=',', fieldnames=CSVFieldNames) csvWriter.writeheader() utf8Output = [] for row in listOutput: utf8Row = {} for key in row: utf8Row[key] = row[key].encode('utf-8') utf8Output.append(utf8Row) csvWriter.writerows(utf8Output) #Print and Log the file creation print('Created CSV File') logger.info('Created CSV file with %i items: %s', len(listOutput), fileOut + '.csv') ######################################################## #Generate the Output XML File parent = Element('schematic') #Create top-level XML element #Loop through each Component and create a child of the XML top-level for listItem in listOutput: child = SubElement(parent, 'component') #Loop through each attribute of the component. # We do it this way, in order to preserve order of elements - more logical output for key in CSVFieldNames: if key in listItem: attribute = SubElement( child, key.replace(' ', '_').replace('&', '_') ) #XML doesn't like spaces in element names, so replace them with "_" attribute.text = listItem[key] #Output to XML file ET = ElementTree(parent) ET.write(fileOut + '.xml') #Print and Log the file creation print('Created XML File') logger.info('Created XML file with %i items: %s', len(listOutput), fileOut + '.xml') #Close the log file - we're done! logging.shutdown()
class Beauti: def __init__(self, template_file): self.template = Tree() _ = self.template.parse(template_file) def populate(self, fasta, stem, time_unit='days', chain_length=None, screen_step=None, log_step=None, treelog_step=None, root_height=None): """ Load sequences from FASTA object into BEAST XML template :param fasta: a Python list object containing sublists of header/sequence pairs :param stem: file path and prefix to write *.log and *.tree files :param time_unit: used by BEAST for annotation only (e.g., days, years) :param chain_length: optional setting for number of steps in MCMC chain :return: paths to BEAST log and tree log files """ logfile = stem + '.log' treefile = stem + '.trees' # reset TAXA and ALIGNMENT blocks t_taxa = self.template.findall('taxa')[0] t_taxa._children = [] t_aln = self.template.find('alignment') t_aln._children = [] for k, v in fasta.iteritems(): h = v['header'] s = v['sequence'] date_val = float(v['days']) date = Node( 'date', { 'units': time_unit, 'direction': 'forwards', 'value': str(date_val) }) # TAXA taxon = Node('taxon', {'id': h}) taxon.append(date) t_taxa.append(taxon) # SEQUENCE seqtag = Node('sequence', {}) staxon = Node('taxon', {'idref': h}) staxon.tail = '\n\t\t\t' + s.upper( ) # mimic formatting in BEAST XML seqtag.append(staxon) t_aln.append(seqtag) # revise log settings t_mcmc = self.template.find('mcmc') if chain_length: t_mcmc.set('chainLength', str(int(chain_length))) # number of MCMC steps # set prior distribution for rootheight if root_height and type(root_height) is tuple and len( root_height) == 2: lower, upper = root_height assert lower <= upper, 'Root height prior specification lower must be <= upper.' # # set the uniform prior priors = t_mcmc.find('posterior').find('prior').getchildren() found = False for prior in priors: parameter = prior.find('parameter') if parameter is None or parameter.get( 'idref') != 'treeModel.rootHeight': continue found = True prior.set('lower', str(lower)) prior.set('upper', str(upper)) if not found: # TODO: create new element prior = Node('uniformPrior') prior.set('idref', 'treeModel.rootHeight') prior.set('lower', str(lower)) prior.set('upper', str(upper)) # rescale starting tree to be compatible with this prior t_tree = self.template.find('rescaledTree') t_tree.set('height', str(0.1 * (upper - lower) + lower)) for log in t_mcmc.findall('log'): if log.get('id') == 'fileLog': log.set('fileName', logfile) if log_step: log.set('logEvery', str(int(log_step))) elif log.get('id') == 'screenLog': if screen_step: log.set('logEvery', str(int(screen_step))) log_tree_element = t_mcmc.find('logTree') log_tree_element.set('fileName', treefile) if treelog_step: log_tree_element.set('logEvery', str(int(treelog_step))) return logfile, treefile def write(self, handle): self.template.write(handle) def parse_log(self, handle, mod=1): """ Parse contents of a BEAST log file :param handle: an open file handle :return: """ keys = None result = {} count = 0 for line in handle: if line.startswith('#'): # ignore comment line continue if not result: # reached the first non-comment line keys = line.strip('\n').split('\t') result = dict([(key, []) for key in keys]) continue if count % mod == 0: values = line.strip('\n').split('\t') for i, k in enumerate(keys): v = values[i] result[k].append(int(v) if k == 'state' else float(v)) count += 1 return result def parse_treelog(self, handle, sample_size): """ Extract sampled trees from BEAST tree log and convert into Newick tree strings. :param handle: :return: """ # use grep to figure out how many trees are in the file p = subprocess.Popen(['grep', '--count', '-e', '^tree', handle.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() ntrees = int(stdout.strip('\n')) # figure out a modulus to get target sample mod = max(ntrees / sample_size, 1) trBlock = False label_d = {} newicks = [] tree_count = 0 for line in handle: if 'Translate' in line: # entered taxa block trBlock = True continue if trBlock: if ';' in line: # end of block trBlock = False continue index, label = line.strip(',\n').split()[-2:] label_d.update({index: label}) continue # this should follow the Translate block if line.startswith('tree'): tree_count += 1 if tree_count % mod == 0: tree = line.split()[-1].strip(';') # remove figtree annotations tree1 = re.sub('\[[^]]+\]', '', tree) # replace all indices with labels tree2 = re.sub('([0-9]+):', lambda x: label_d[x.groups()[0]] + ':', tree1) newicks.append(tree2) continue return newicks def max_credible(self, newicks): """ Return the maximum clade crediblity tree :param newicks: a list of Newick tree strings :return: """ def get_tips(tree): return tuple( sorted([tip.name for tip in tree.root.get_terminals()])) def get_clades(tree): # recursive function to collect all monophyletic clades of tips result = [get_tips(tree)] for subtree in tree.root: if not subtree.is_terminal(): result.extend(get_clades(subtree)) return result ntrees = len(newicks) counts = {} clades_cache = {} for newick in newicks: handle = StringIO(newick) tree = Phylo.read(handle, 'newick') clades = get_clades(tree) for clade in clades: if clade not in counts: counts.update({clade: 0.}) counts[clade] += 1. / ntrees clades_cache.update({newick: clades}) def credibility(clades, counts): return sum(math.log(counts[clade]) for clade in clades) return max(clades_cache.keys(), key=lambda t: credibility(clades_cache[t], counts))
def generate_basis(self, symbol): """ Author: "Kittithat (Mick) Krongchon" <*****@*****.**> and Lucas K. Wagner Returns a string containing the basis section. It is modified according to a simple recipe: 1) The occupied atomic orbitals are kept, with exponents less than 'cutoff' removed. 2) These atomic orbitals are augmented with uncontracted orbitals according to the formula e_i = params[0]*params[2]**i, where i goes from 0 to params[1] These uncontracted orbitals are added for every occupied atomic orbital (s,p for most elements and s,p,d for transition metals) Args: symbol (str): The symbol of the element to be specified in the D12 file. Returns: str: The pseudopotential and basis section. Uses the following member variables: xml_name (str): The name of the XML pseudopotential and basis set database. cutoff: smallest allowed exponent params: parameters for generating the augmenting uncontracted orbitals initial_charges """ maxorb = 3 basis_name = "vtz" nangular = {"s": 1, "p": 1, "d": 1, "f": 1, "g": 0} maxcharge = {"s": 2, "p": 6, "d": 10, "f": 15} basis_index = {"s": 0, "p": 2, "d": 3, "f": 4} transition_metals = [ "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn" ] if symbol in transition_metals: maxorb = 4 nangular['s'] = 2 tree = ElementTree() tree.parse(self.xml_name) element = tree.find('./Pseudopotential[@symbol="{}"]'.format(symbol)) atom_charge = int(element.find('./Effective_core_charge').text) if symbol in self.initial_charges.keys(): atom_charge -= self.initial_charges[symbol] basis_path = './Basis-set[@name="{}"]/Contraction'.format(basis_name) found_orbitals = [] totcharge = 0 ret = [] ncontract = 0 for contraction in element.findall(basis_path): angular = contraction.get('Angular_momentum') if found_orbitals.count(angular) >= nangular[angular]: continue #Figure out which coefficients to print out based on the minimal exponent nterms = 0 basis_part = [] for basis_term in contraction.findall('./Basis-term'): exp = basis_term.get('Exp') coeff = basis_term.get('Coeff') if float(exp) > self.cutoff: basis_part += [' {} {}'.format(exp, coeff)] nterms += 1 #now write the header if nterms > 0: found_orbitals.append(angular) charge = min(atom_charge - totcharge, maxcharge[angular]) #put in a special case for transition metals: #depopulate the 4s if the atom is charged if symbol in transition_metals and symbol in self.initial_charges.keys() \ and self.initial_charges[symbol] > 0 and found_orbitals.count(angular) > 1 \ and angular=="s": charge = 0 totcharge += charge ret += [ "0 %i %i %g 1" % (basis_index[angular], nterms, charge) ] + basis_part ncontract += 1 #Add in the uncontracted basis elements angular_uncontracted = ['s', 'p'] if symbol in transition_metals: angular_uncontracted.append('d') for angular in angular_uncontracted: for i in range(0, self.basis_params[1]): exp = self.basis_params[0] * self.basis_params[2]**i line = '{} {}'.format(exp, 1.0) ret += ["0 %i %i %g 1" % (basis_index[angular], 1, 0.0), line] ncontract += 1 return ["%i %i"%(Element(symbol).number+200,ncontract)] +\ self.pseudopotential_section(symbol) +\ ret
path = "%s/res" % (input) files = os.listdir(path) if len(files) == 0: exit(1) exitCode = 0 for file in files: root, extension = os.path.splitext(file) if (extension != ".xml"): continue xml = "%s/%s" % (path, file) tree = ElementTree() try: tree.parse(xml) if (job_type == "analysis"): ok, lfn = readXMLAnalysis(tree) else: ok, lfn = readXMLPublish(tree) if (not ok): exitCode = 1 continue absoluteLFN = lfnPrefix % lfn if (checkRFIO): exists = os.system("rfdir \"%s\" &> /dev/null" % absoluteLFN) if (exists != 0): sys.stderr.write( "[%s] Error: file %s does not exists. Skipping it.\n" % (input, absoluteLFN))
def get_module_name(pom_file): tree = ElementTree() tree.parse(pom_file) return tree.findtext("./{%s}artifactId" % maven_pom_xml_namespace)
#!/usr/bin/env python3 from sys import stdin, stdout, argv from xml.etree.ElementTree import ElementTree import xml tree = ElementTree() #xml.etree.ElementTree.register_namespace("","http://www.w3.org/2005/Atom") tree.parse(stdin) if argv[1] == '--whitelist': whitelist = True keywordID = argv[2:] else: whitelist = False keywordID = argv[1:] root = tree.getroot() for channel in tree.findall('channel'): for node in tree.findall('.//item'): ch = node.find('title') if ch is not None: node_matched = False for keyword in keywordID: if ch.text.find(keyword) != -1: node_matched = True break if node_matched ^ whitelist: channel.remove(node) tree.write(stdout, encoding='unicode')
def parse_wp_xml(file): parser = ns_tracker_tree_builder() tree = ElementTree() print "reading: " + wpe root = tree.parse(file, parser) ns = parser.namespaces ns[''] = '' c = root.find('channel') def parse_header(): return { "title": unicode(c.find('title').text), "link": unicode(c.find('link').text), "description": unicode(c.find('description').text) } def parse_items(): export_items = [] xml_items = c.findall('item') for i in xml_items: taxanomies = i.findall('category') export_taxanomies = {} for tax in taxanomies: if not "domain" in tax.attrib: continue t_domain = unicode(tax.attrib['domain']) t_entry = unicode(tax.text) if (not (t_domain in taxonomy_filter) and not (t_domain in taxonomy_entry_filter and taxonomy_entry_filter[t_domain] == t_entry)): if not t_domain in export_taxanomies: export_taxanomies[t_domain] = [] export_taxanomies[t_domain].append(t_entry) def gi(q, unicode_wrap=True): namespace = '' tag = '' if q.find(':') > 0: namespace, tag = q.split(':', 1) else: tag = q try: result = i.find(ns[namespace] + tag).text print result except AttributeError: result = "No Content Found" if unicode_wrap: result = unicode(result) return result body = gi('content:encoded') for key in body_replace: body = body.replace(key, body_replace[key]) img_srcs = [] if body is not None: try: soup = BeautifulSoup(body) img_tags = soup.findAll('img') for img in img_tags: img_srcs.append(img['src']) except: print "could not parse html: " + body #print img_srcs export_item = { 'title': gi('title'), 'date': gi('wp:post_date'), 'slug': gi('wp:post_name'), 'status': gi('wp:status'), 'type': gi('wp:post_type'), 'wp_id': gi('wp:post_id'), 'parent': gi('wp:post_parent'), 'comments': gi('wp:comment_status') == u'open', 'taxanomies': export_taxanomies, 'body': body, 'img_srcs': img_srcs } export_items.append(export_item) return export_items return { 'header': parse_header(), 'items': parse_items(), }
def read_xml(in_path): '''读取并解析xml文件''' tree = ElementTree() tree.parse(in_path) return tree
if elem.tag.startswith(ns): elem.tag = elem.tag[nsl:] wrap = OAuthWrapper() #response = wrap.request('http://fantasysports.yahooapis.com/fantasy/v2/game/nfl/stat_categories') #response = wrap.request('http://fantasysports.yahooapis.com/fantasy/v2/users;use_login=1/games;game_keys=nfl/leagues') players = [] for start in range(0, 800, 25): response = wrap.request( 'http://fantasysports.yahooapis.com/fantasy/v2/league/nfl.l.15740/players;sort=OR;count=25;start={0}/' .format(start)) tree = ElementTree() tree.parse(response) remove_namespace(tree.getroot(), 'http://fantasysports.yahooapis.com/fantasy/v2/base.rng') for (rank, player) in enumerate(tree.findall('league/players/player'), 1): player_data = { 'yahoo_id': int(player.findtext('player_id')), 'rank': start + rank, 'name': player.findtext('name/full'), 'team': player.findtext('editorial_team_abbr'), 'bye': int(player.findtext('bye_weeks/week')), 'position': player.findtext('display_position'), } players.append(player_data) with open('players.json', 'w') as f:
class importOMS(): def __init__(self, userProfile ): self.__filename = "" self.project = None self.__tree = None # Manejo del log self.__logger = logging.getLogger("Convert XML Database") self.__logger.setLevel(logging.DEBUG) formatter = logging.Formatter('[%(levelname)s] %(message)s') handler = logging.StreamHandler() handler.setFormatter(formatter) self.__logger.addHandler(handler) # Errors Constants self.OK = 0 self.ERROR_OPEN_FILE = 1 self.ERR0R_PARSE_XML = 2 self.OPERATIONAL_ERROR = 3 self.ADDING_ERROR = 4 self.ERROR = 5 self.userProfile = userProfile # filename doit etre un fichier XML def loadFile(self, filename): # In oder to conserve the file self.__tree = ElementTree() # Logging info self.__logger.info("Chargement du fichier...") # self.__tree.parse(filename) try: self.__tree.parse(filename) self.__filename = filename except IOError: self.__logger.critical("Impossible d ouvrir le fichier...") return self.ERROR_OPEN_FILE except: self.__logger.critical("Erreur de traitement fichier...") return self.ERROR # Logging info self.__logger.info("Chargement du fichier effectue...") return self.OK def __write(self): # Logging info self.__logger.info("Ecriture dans la base de donnee...") # Los elementos superXXX son referencias de tipo caracter, fdsModel = ('code', 'category', 'modelPrefix',) fdsEntity = ('code',) # fdsProperty = ( 'code', 'alias', 'physicalName', 'foreignEntity' ) fdsProperty = ('code',) booProperty = ('isPrimary', 'isNullable', 'isRequired', 'isSensitive', 'isEssential',) fdsRelationship = ('code', 'baseMin', 'baseMax', 'refMin', 'refMax',) # need for setSecurityInfo data = {} # We populate the database if (self.__tree != None): # A file has been loaded xProjects = self.__tree.getiterator("domain") # ------------------------------------------------------------------------------ xModels = xProjects[0].getiterator("model") for xModel in xModels: dModel = Model() dModel.project = self.project modelUdps = [] for child in xModel: if child.tag in fdsModel: setattr(dModel, child.tag, child.text) elif child.tag == 'udps': for xUdp in child: modelUdps.append((xUdp.tag, xUdp.get('text'))) try: setSecurityInfo(dModel, data, self.userProfile, True ) dModel.save() except: self.__logger.info("Error dModel.save") return self.__logger.info("Model..." + dModel.code) # ------------------------------------------------------------------------------ xEntitys = xModel.getiterator("concept") for xEntity in xEntitys: dEntity = Entity() dEntity.model = dModel for child in xEntity: if (child.tag in fdsEntity): if (child.text is not None): setattr(dEntity, child.tag, child.text) elif (child.tag == 'physicalName'): setattr(dEntity, 'dbName' , child.text) try: setSecurityInfo(dEntity, data, self.userProfile, True ) dEntity.save() except: self.__logger.info("Error dEntity.save") return self.__logger.info("Entity..." + dEntity.code) # ------------------------------------------------------------------------------ xProperties = xEntity.getiterator("property") for xProperty in xProperties: dProperty = Property() dProperty.entity = dEntity for child in xProperty: if child.tag in fdsProperty: if (child.text is not None): setattr(dProperty, child.tag, child.text) elif child.tag in booProperty: bValue = toBoolean(child.text) setattr(dProperty, child.tag, bValue) try: setSecurityInfo(dProperty, data, self.userProfile, True ) dProperty.save() except: self.__logger.info("Error prpDom.save") return # Relationship ------------------------------------------------------------------- xForeigns = xEntity.getiterator("foreign") for xForeign in xForeigns: dForeign = Relationship() dForeign.entity = dEntity dForeign.refEntity = dEntity for child in xForeign: if child.tag in fdsRelationship: setattr(dForeign, child.tag, child.text) elif (child.tag == 'baseConcept'): setattr(dForeign, 'dbName' , child.text) elif (child.tag == 'alias'): setattr(dForeign, 'relatedName' , child.text) elif child.tag in booProperty: bValue = toBoolean(child.text) setattr(dForeign, child.tag, bValue) try: setSecurityInfo(dForeign, data, self.userProfile, True ) dForeign.save() except Exception, e: self.__logger.info("Error dForeign.save" + str(e)) return # Logging info self.__logger.info("Ecriture dans la base de donnee effectuee...") return {'state':self.OK, 'message': 'Ecriture effectuee'}
def parse_meta(xml_file): rpc_dict = {} tree = ElementTree() tree.parse(xml_file) b = tree.find('IMD/IMAGE/SATID') # WorldView if b.text not in ['WV01', 'WV02', 'WV03']: raise ValueError('not a WorldView satellite!') im = tree.find('RPB/IMAGE') l = im.find('LINENUMCOEFList/LINENUMCOEF') rpc_dict['rowNum'] = [float(c) for c in l.text.split()] l = im.find('LINEDENCOEFList/LINEDENCOEF') rpc_dict['rowDen'] = [float(c) for c in l.text.split()] l = im.find('SAMPNUMCOEFList/SAMPNUMCOEF') rpc_dict['colNum'] = [float(c) for c in l.text.split()] l = im.find('SAMPDENCOEFList/SAMPDENCOEF') rpc_dict['colDen'] = [float(c) for c in l.text.split()] # self.inverseBias = float(im.find('ERRBIAS').text) # scale and offset rpc_dict['rowOff'] = float(im.find('LINEOFFSET').text) rpc_dict['rowScale'] = float(im.find('LINESCALE').text) rpc_dict['colOff'] = float(im.find('SAMPOFFSET').text) rpc_dict['colScale'] = float(im.find('SAMPSCALE').text) rpc_dict['latOff'] = float(im.find('LATOFFSET').text) rpc_dict['latScale'] = float(im.find('LATSCALE').text) rpc_dict['lonOff'] = float(im.find('LONGOFFSET').text) rpc_dict['lonScale'] = float(im.find('LONGSCALE').text) rpc_dict['altOff'] = float(im.find('HEIGHTOFFSET').text) rpc_dict['altScale'] = float(im.find('HEIGHTSCALE').text) # meta dict meta_dict = {'rpc': rpc_dict} # image dimensions meta_dict['height'] = int(tree.find('IMD/NUMROWS').text) meta_dict['width'] = int(tree.find('IMD/NUMCOLUMNS').text) # date string is in ISO format meta_dict['capTime'] = dateutil.parser.parse( tree.find('IMD/IMAGE/TLCTIME').text) # sun direction meta_dict['sunAzim'] = float(tree.find('IMD/IMAGE/MEANSUNAZ').text) meta_dict['sunElev'] = float(tree.find('IMD/IMAGE/MEANSUNEL').text) # satellite direction meta_dict['satAzim'] = float(tree.find('IMD/IMAGE/MEANSATAZ').text) meta_dict['satElev'] = float(tree.find('IMD/IMAGE/MEANSATEL').text) # cloudless or not meta_dict['cloudCover'] = float(tree.find('IMD/IMAGE/CLOUDCOVER').text) meta_dict['sensor_id'] = tree.find('IMD/IMAGE/SATID').text return meta_dict
def create_xml(filepath): et = ElementTree() return et.parse(source=filepath)
only_cutlist=[] # only_cutlist.append("yanjing1") # only_cutlist.append("harbin31") # only_cutlist.append("snow10") #only_cutlist.append("snow129") only_cutlist.append("laoshan5") files = os.listdir(xmldir) i = 1 for filename in files: print str(i) + "\t" + filename + "\n" i = i + 1 if filename[-3:]!='xml' : continue xmldata = ElementTree() xmldata.parse(xmldir+filename) if os.path.exists(jpgdir + filename[0:-4] + ".jpg"): img = Image.open(jpgdir + filename[0:-4] + ".jpg") elif os.path.exists(jpgdir + filename[0:-4] + ".jpeg"): img = Image.open(jpgdir + filename[0:-4] + ".jpeg") else: print jpgdir + filename[0:-4] + ".jpg:"+" No that image!\n" nodelist = xmldata.findall("object") for n in range(len(nodelist)): nodename = nodelist[n].find("name").text nodename = nodename.replace("*","-") dirname = patchdir + nodename + "/" # if nodename not in only_cutlist: # continue if os.path.isdir(dirname): nodexy = nodelist[n].findall("bndbox")
def import_orders(self): tree = ElementTree.parse(self.filename) for doc in tree.getroot().findall(u'Документ'): self.import_order(doc)
def read_svg(path): tree = ElementTree() tree.parse(path) return tree