def pmc_article_search(): id_list = [] search_string = '((neuron electrophysiology) OR (neurophysiology) OR ("input resistance") OR ("resting potential" OR "resting membrane potential") OR "LTP" OR "synaptic plasticity" OR "LTD")' search_string_quoted = quote(search_string, safeChars) #print search_string_quoted retstart = 0 retmax = 20 link = esearch % (search_string_quoted, retstart, retmax) req = Request(link) handle = urlopen(req) data = handle.read() xml = XML(data) num_found_articles = 20#int(xml.find('.//Count').text) while retstart < num_found_articles: link = esearch % (search_string_quoted, retstart, retmax) req = Request(link) handle = urlopen(req) data = handle.read() #print data xml = XML(data) id_list_temp = xml.findall(".//Id") if len(id_list_temp) == 0: id_list_temp = int(xml.findall(".//Id")) for id_elem in id_list_temp: id_list.append(int(id_elem.text)) retstart += retmax return id_list
def protocolParser(protocol,game,conn): print protocol xmlelement=XML(protocol) print xmlelement.tag if(xmlelement.tag=="CLOGIN"): a_lst = xmlelement.findall("username") val='' for node in a_lst: val=node.attrib["usr"] print val threading.Thread(target=srvau(val,game,conn)).start() elif(xmlelement.tag=="CPLAY"): a_lst = xmlelement.findall("username") print 'a_lst' val='' for node in a_lst: val=node.attrib["usr"] print val threading.Thread(target=splay(val,game,conn)).start() elif(xmlelement.tag=="CDICE"): a_lst = xmlelement.findall("username") val='' for node in a_lst: val=node.attrib["usr"] print val threading.Thread(target=sdice(val,game,conn)).start()
def analyze(self, url, text=None): if text: args = {'uri': url, 'appId': self.api_key, 'text': text} else: args = {'uri': url, 'appId': self.api_key} args_enc = urllib.urlencode(args) output = urllib.urlopen(self.url, args_enc).read() results = [] if output: dom = XML(output) aliases = [] for alias in dom.findall('.//alias'): aliases.append(alias.text) for entity in dom.findall('.//entity'): name = entity.find('name') try: if float(entity.attrib['score']) < self.relevance: continue except KeyError: pass if name != None: if name.text in aliases: continue cname = entity.find('canonicalName') if cname != None: name = cname if name == None: continue else: if name.text in results: continue results.append(name.text) return results
def _get_sys_info(self): """ Returns sysinfo of host system in the following format:: {'memory': [{'bank_locator': 'BANK 0', 'form_factor': 'SODIMM', 'locator': 'ChannelA-DIMM0', 'manufacturer': 'Samsung', 'part_number': 'M471B5273DH0-CK0', 'serial_number': '9760E90B', 'size': '4096 MB', 'speed': '1600 MHz', 'type': 'DDR3', 'type_detail': 'Synchronous'}, {'bank_locator': 'BANK 2', 'form_factor': 'SODIMM', 'locator': 'ChannelB-DIMM0', 'manufacturer': 'Micron', 'part_number': '16KTF51264HZ-1G6M1', 'serial_number': '3255C613', 'size': '4096 MB', 'speed': '1600 MHz', 'type': 'DDR3', 'type_detail': 'Synchronous'}], 'processor': {'external_clock': '100 MHz', 'family': 'Core i5', 'manufacturer': 'Intel(R) Corporation', 'max_speed': '2600 MHz', 'part_number': 'None', 'serial_number': 'None', 'signature': 'Type 0, Family 6, Model 58, Stepping 9', 'socket_destination': 'CPU Socket - U3E1', 'status': 'Populated, Enabled', 'type': 'Central Processor', 'version': 'Intel(R) Core(TM) i5-3320M CPU @ 2.60GHz'}, 'system': {'family': 'ThinkPad T430', 'manufacturer': 'LENOVO', 'product': '234455G', 'serial': 'PBKVYA6', 'sku': 'LENOVO_MT_2344', 'uuid': 'D6A27701-51F5-11CB-963F-F8A34AA11505', 'version': 'ThinkPad T430'}} """ xml = XML(self.conn.getSysinfo(0)) sysinfo = {} keys = ['system', 'processor'] for key in keys: sysinfo[key] = {} for element in xml.findall(key+'/entry'): sysinfo[key][element.attrib['name']] = element.text sysinfo['memory'] = [] for memorydevs in xml.findall('memory_device'): x = {} for entry in memorydevs.findall('entry'): x[entry.attrib['name']] = entry.text sysinfo['memory'].append(x) return sysinfo
def pubmed_count_coauthored_papers(author_1, author_2): """ Count number of co-published papers between author_1 and author_2 co-published defined by number of papers in pubmed where author_1 is first author and author_2 is last author Args: author_1 (str): String of first authors name; e.g., Tripathy S author_2 (str): String of second authors name; e.g., Urban N Returns: match_count (int): count of numbers of matching pubmed articles """ advisee_author_str = author_1 adviser_author_str = author_2 esearchlink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?'+\ 'db=pubmed&term=%s[Author] AND (%s[Author])' efetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'+\ '&db=pubmed&retmode=xml&id=%s' advisee_last_name = advisee_author_str.split()[0] #print advisee_last_name adviser_last_name = adviser_author_str.split()[0] match_count = 0 link = esearchlink % (advisee_author_str, adviser_author_str) linkCoded = quote(link, ':=/&()?_') req = Request(linkCoded) handle = urlopen(req) data = handle.read() xml = XML(data) matching_pmids = [xml_ob.text for xml_ob in xml.findall('.//Id')] for pmid in matching_pmids: link = efetch % (pmid) req = Request(link) handle = urlopen(req) data = handle.read() xml = XML(data) authorList = xml.findall(".//Author[@ValidYN='Y']") if len(authorList) == 0: authorList = xml.findall(".//Author") #print authorList if len(authorList) > 1: #print data try: first_author_last_name = authorList[0].find("./LastName").text #print first_author_last_name last_author_last_name = authorList[-1].find("./LastName").text except Exception: continue if first_author_last_name == advisee_last_name and last_author_last_name == adviser_last_name: match_count += 1 #print '%s, %s, matching pubs = %d' % \ # (advisee_author_str, adviser_author_str, match_count) return match_count
def _list_alchemy_results(xml, relevance): dom = XML(xml) results = [] if dom.find("status").text == "OK": for concept in dom.findall(".//concept"): if float(concept.find("relevance").text) > relevance: results.append(concept.find("text").text) for kw in dom.findall(".//keyword"): if float(kw.find("relevance").text) > relevance: results.append(kw.find("text").text) return results
def _list_alchemy_results(xml,relevance): dom = XML(xml) results = [] if dom.find('status').text == 'OK': for concept in dom.findall('.//concept'): if float(concept.find('relevance').text) > relevance: results.append(concept.find('text').text) for kw in dom.findall('.//keyword'): if float(kw.find('relevance').text) > relevance: results.append(kw.find('text').text) return results
def test_myschema_xsd02(self): "With Document" body_map = {'user_id': 1, 'title': '', 'description': '', 'linked_folder': '', 'filedata': '', } backend = self._get_backend(DocumentFakeBackend, subject='create_doc', body_map=body_map, model=Document ) builder = self._get_builder(backend) xsd = '{http://www.w3.org/2001/XMLSchema}' content = builder._render_myschema_xsd(self.request) xml = XML(content) self.assertEqual(builder.namespace, xml.get('targetNamespace')) self.assertEqual(builder.namespace, re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content).groupdict()['ns'] ) # Can't be got with ElementTree, because it's a namespace ref_attrs = {node.get('ref') for node in xml.findall('{xsd}element/{xsd}complexType/{xsd}sequence/{xsd}element'.format(xsd=xsd)) } expected_ref_attrs = {'my:{}'.format(key) for key in body_map} self.assertEqual(expected_ref_attrs, ref_attrs) xsd_elements = { 'CremeCRMCrudity': {'name': 'CremeCRMCrudity'}, 'user_id': {'name': 'user_id', 'type': 'xsd:integer'}, # <xsd:element name="user_id" type="xsd:integer"/> 'title': {'name': 'title', 'type': 'my:requiredString'}, # <xsd:element name="first_name" type="xsd:requiredString"/> # <xsd:element name="description"> # <xsd:complexType mixed="true"> # <xsd:sequence> # <xsd:any minOccurs="0" maxOccurs="unbounded" # namespace="http://www.w3.org/1999/xhtml" processContents="lax"/> # </xsd:sequence> # </xsd:complexType> # </xsd:element> 'description': {'name': 'description'}, 'linked_folder': {'name': 'linked_folder', 'type': 'xsd:integer'}, 'filedata': {'name': 'filedata', 'type': 'my:requiredBase64Binary'}, } for element_node in xml.findall('{}element'.format(xsd)): xsd_element_attrs = xsd_elements.get(element_node.get('name')) if xsd_element_attrs is None: self.fail('There is at least an extra node named: {}'.format(element_node.get('name'))) self.assertEqual(set(xsd_element_attrs.keys()), set(element_node.keys())) for attr in element_node.keys(): self.assertEqual(xsd_element_attrs[attr], element_node.get(attr))
def parse(self,url): ti = TextInventory() logger.info("Parsing CTS service @ %s"%url) try: # TODO: separate this line in order to raise separate errors! xml_ti = XML(urllib2.urlopen(url+_get_capab_req).read()) # retrieve and store the TI version for node in xml_ti.findall('.//%sTextInventory'%_cts_ns): ti.set_version(node.attrib.get('tiversion')) logger.info("TextInventory version: %s"%ti.get_version()) # retrieve and store the textgroups for node in xml_ti.findall('.//%stextgroup'%_cts_ns): tg = TextGroup(id=node.attrib.get('projid')) tg.xml = ElementTree.tostring(node) ti.add('tg',tg) # retrieve and store the works for child in node: # parse groupname elem if(child.tag=="%s%s"%(_cts_ns,"groupname")): tg.set_name(child.attrib.get("%slang"%_xml_ns),re.sub(r'\s\s+',"",child.text)) logger.debug("Found TextGroup: \"%s\""%tg.get_name(child.attrib.get("%slang"%_xml_ns))) #parse work elem elif(child.tag=="%s%s"%(_cts_ns,"work")): w = Work(id=child.attrib.get('projid'),tg=tg.id) for title in child.findall('.//%stitle'%_cts_ns): w.set_title(title.text) ti.add('wk',w) logger.debug("Found Work: %s"%w.id) # parse edition elem for node in child.findall('.//%sedition'%_cts_ns): e = Edition(id=node.attrib.get('projid'),work=w.id) for child in node: if(child.tag=="%s%s"%(_cts_ns,"label")): e.label=child.text elif(child.tag=="%s%s"%(_cts_ns,"description")): lang=child.attrib.get("%slang"%_xml_ns) desc=child.text e.add_desc(lang,desc) elif(child.tag=="%s%s"%(_cts_ns,"online")): pass # ti.add('ed',e) except ExpatError as error: logger.error("Parsing of %s failed with error \"%s\""%(url,str(error))) return ti
def check_existing_license(netconf_handler, licensetype='essential'): ''' This procedure takes in the netconf handler for the switch and configures 2-event classification on the given interface. Procedure returns True if configuration successful, else returns False ''' licenseVaildated = False output = (str( netconf_handler.get_config(source='running', filter=('xpath', "/native/license")))) netconf_reply = xml.dom.minidom.parseString( str( netconf_handler.get_config(source='running', filter=('xpath', "/native/license")))) config = XML(netconf_reply.toxml("utf-8")) for data in config.findall( '{urn:ietf:params:xml:ns:netconf:base:1.0}data'): for native in data.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-native}native'): for license in native.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-native}license'): for boot in license.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-native}boot'): for level in boot.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-native}level' ): for lictype in level: if licensetype in str(lictype.tag): licenseVaildated = True if licenseVaildated: return_val = True else: print("license not validated. Output %s" % output) return_val = False return return_val
def parse_kml(kmlstring): entries = [] kmldom = XML(kmlstring) ns = kmldom.tag.strip('kml') placemarks = kmldom.findall('.//%sPlacemark' % ns) for placemark in placemarks: entry = { 'title': '', 'summary': '', 'summary_detail': { 'type': 'text/html' }, 'link': '', 'tags': None } title = placemark.findall(ns + 'name') if title: entry['title'] = title[0].text else: entry['title'] = '' summary = placemark.findall(ns + 'description') entry['summary'] = sanitize_kml_description(summary) links = placemark.findall('{http://www.w3.org/2005/Atom}link') for link in links: entry['link'] = link.attrib.get('href') entries.append(entry) return entries
def __init__(self, **kwargs): self._issue_date_utc = None self._expiration_date_utc = None self.is_trial = "false" if "xml_node" in kwargs: xml_node = kwargs["xml_node"] else: xml_node = XML(kwargs["xml"]) xml_attrs = xml_node.attrib attrs = { "id": "licenseId", "user_id": "userId", "payment_plan_id": "paymentPlanId", "issue_date_utc": "issueDateUTC", "expiration_date_utc": "expirationDateUTC", "is_trial": "is_trial", } for k, v in attrs.items(): if v in xml_attrs: setattr(self, k, xml_attrs[v]) else: setattr(self, k, None) features = [] for node in xml_node.findall(".//feature"): feature = Feature(xml_node=node) features.append(feature) self.features = features super().__init__(**kwargs)
def _parseIssueFeed(self, value, rd): try: feed = XML(value) except: rd.errback() return data = [] for item in feed.findall('channel/item'): timeStruct = time.strptime(item.find('updated').text, '%a, %d %b %Y %H:%M:%S +0000') component = item.find('component') if component is None: component = 'n/a' else: component = component.text data.append({ 'key': item.find('key').text, 'link': item.find('link').text, 'summary': item.find('summary').text, 'type': item.find('type').text, 'status': item.find('status').text, 'component': component, 'updated': timeStruct, }) rd.callback(data)
def verify_optics_configuration(netconf_handler, main_interface): payload = ''' <filter> <interfaces xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-interfaces-oper"> <interface> <name>{itf}</name> </interface> </interfaces> </filter> ''' breakout_interface = main_interface + "/2" netconf_reply = xml.dom.minidom.parseString(str(netconf_handler.get(payload.format(itf=breakout_interface)))) oper_data = XML(netconf_reply.toxml("utf-8")) for data in oper_data.findall('{urn:ietf:params:xml:ns:netconf:base:1.0}data'): for element in data.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-interfaces-oper}interfaces'): for intf in element.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-interfaces-oper}interface'): itf_name = intf.find('{http://cisco.com/ns/yang/Cisco-IOS-XE-interfaces-oper}name').text print(itf_name) try: if itf_name == breakout_interface: return_val = True except: return_val = False return return_val
def load(text, match=None): """This function reads a string that contains the XML of an Atom Feed, then returns the data in a native Python structure (a ``dict`` or ``list``). If you also provide a tag name or path to match, only the matching sub-elements are loaded. :param text: The XML text to load. :type text: ``string`` :param match: A tag name or path to match (optional). :type match: ``string`` """ if text is None: return None text = text.strip() if len(text) == 0: return None nametable = { 'namespaces': [], 'names': {} } root = XML(text) items = [root] if match is None else root.findall(match) count = len(items) if count == 0: return None elif count == 1: return load_root(items[0], nametable) else: return [load_root(item, nametable) for item in items]
def updated(downloader, path, _): if path is None: logging.error('internet archive file list get fail') # FIXME: report to user a failure to download return from xml.etree.ElementTree import XML xml = XML(open(path, 'r').read()) os.remove(path) table = { 'text pdf': u'application/pdf', 'grayscale luratech pdf': u'application/pdf-bw', 'image container pdf': u'application/pdf', 'djvu': u'image/x.djvu', 'epub': u'application/epub+zip', } chosen = None for element in xml.findall('file'): fmt = element.find('format').text.lower() if fmt in table: if table[fmt] == content_type: chosen = element.get('name') break if chosen is None: logging.error('internet archive file list omits content type') # FIXME: report to user a failure to find matching content return url = os.path.join(url_base, chosen) GObject.idle_add(download_cb, url)
def __init__(self, args): self.args = args url = opener.open(args.build+"api/xml") try: log.debug('Fetching xml from %s code:%d', url.url, url.code) if url.code != 200: log.error('Failed to get CI XML from %s (code %d)', url.url, url.code) raise Stop(20, 'Job lookup failed, is the job name correct?') ci_xml = url.read() finally: url.close() root = XML(ci_xml) artifacts = root.findall("./artifact") base_url = args.build+"artifact/" if len(artifacts) <= 0: raise AttributeError( "No artifacts, please check build on the CI server.") patterns = self.get_artifacts_list() for artifact in artifacts: filename = artifact.find("fileName").text for key, value in patterns.iteritems(): if re.compile(value).match(filename): rel_path = base_url + artifact.find("relativePath").text setattr(self, key, rel_path) pass
def gsversion(self): '''obtain the version or just 2.2.x if < 2.3.x Raises: FailedRequestError: If the request fails. ''' if self._version: return self._version about_url = self.service_url + "/about/version.xml" response, content = self.http.request(about_url, "GET") version = None if response.status == 200: dom = XML(content) resources = dom.findall("resource") for resource in resources: if resource.attrib["name"] == "GeoServer": try: version = resource.find("Version").text break except: pass #This will raise an exception if the catalog is not available #If the catalog is available but could not return version information, #it is an old version that does not support that if version is None: self.get_workspaces() # just to inform that version < 2.3.x version = "2.2.x" self._version = version return version
def test_manifest_xsf_02(self): "Test Image fk field." body_map = {'user_id': 1, 'image': ''} backend = self._get_backend( ContactFakeBackend, subject='create_contact', body_map=body_map, model=Contact, ) builder = self._get_builder(backend) content = builder._render_manifest_xsf(self.request) self.assertEqual(re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content)['ns'], builder.get_namespace() ) # Can't be got with ElementTree, because it's a namespace xsf = '{http://schemas.microsoft.com/office/infopath/2003/solutionDefinition}' xml = XML(content) xmlToEdit_node = xml.find(f'{xsf}views/{xsf}view/{xsf}editing/{xsf}xmlToEdit') self.assertIsNotNone(xmlToEdit_node) self.assertEqual('image', xmlToEdit_node.get('name')) self.assertEqual('/my:CremeCRMCrudity/my:image', xmlToEdit_node.get('item')) button_nodes = xml.findall(f'{xsf}views/{xsf}view/{xsf}menuArea/{xsf}button') self.assertTrue(button_nodes) self.assertSetEqual( {'image'}, {button_node.get('xmlToEdit') for button_node in button_nodes} )
def findSha1Artifacts(self, strFileName, strGroupID, strArtifactID): atVersions = [] # Generate the SHA1 sum for the file. strFileSha1 = self.generate_sha1_from_file(strFileName) strPath = self.strUrlLuceneSearchSha1 % strFileSha1 aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath) tSearchResult = XML(aucContent) # The search result must be complete. if tSearchResult.findtext('tooManyResults')!='false': raise Exception("Received a truncated search result!") # Loop over all results. for tNode in tSearchResult.findall('data/artifact'): strG = tNode.findtext('groupId') strA = tNode.findtext('artifactId') strVersion = tNode.findtext('version') if isinstance(strG, basestring)==True and isinstance(strA, basestring)==True and isinstance(strVersion, basestring)==True: strG = strG.strip() strA = strA.strip() strVersion = strVersion.strip() if strGroupID==strG and strArtifactID==strA: if strVersion=='SNAPSHOT': tVersion = deploy_version.version(0, 0, 0) else: tVersion = deploy_version.version(strVersion) atVersions.append(tVersion) atVersions.sort() return atVersions
def get_pubmed_id_from_doi(doi): searchLink = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s[aid]" % (doi) handle = urlopen(searchLink) data = handle.read() xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it pmidList = [x.text for x in xml.findall(".//Id")] # find xml "Id" elements return pmidList
def do_dg_wwn_map(self): devs = [] with open(self.dgfile_local_name, "r") as filep: for line in filep.readlines(): if "DEV" not in line: continue devs.append(line.split()[1]) cmd = [ "/usr/symcli/bin/symdev", "list", "-output", "xml_e", "-sid", self.symid, "-devs", ",".join(devs), "-v" ] ret, out, err = self.call(cmd) if ret != 0: raise ex.Error("Failed to run command %s" % ' '.join(cmd)) tree = XML(out) mapping = [] for dev in tree.findall("Symmetrix/Device"): try: local = dev.find('Product/wwn').text remote = dev.find('RDF/Remote/wwn').text except Exception as exc: self.log.warning(str(exc)) else: mapping.append((local, remote)) with open(self.wwn_map_fpath, 'w') as filep: json.dump(mapping, filep) filep.write("\n")
def print_usa(url): data = get_data(url) dy = XML(data) for cd in dy.findall("./CD"): if cd.find("./YEAR").text == "1988": print cd.find("./TITLE").text
def download(job, regex): """Grabs platform specific distribution targets from Hudson""" url = urllib.urlopen("/".join([HUDSON_ROOT, job, HUDSON_XML_SUFFIX])) hudson_xml = url.read() hudson_xml = hudson_xml.replace('origin/', 'origin_') url.close() root = XML(hudson_xml) building = root.findtext("./building") if building == 'true': print '%s build in progress, exiting...' % job sys.exit(1) revision = root.findtext("./changeSet/revision/revision") artifacts = root.findall("./artifact") print "Retrieving %s job artifacts from revision: %s" % (job, revision) base_url = "/".join([HUDSON_ROOT, job, 'lastSuccessfulBuild/artifact']) new_artifacts = list() for artifact in artifacts: filename = artifact.findtext("fileName") if not regex.match(filename): continue artifact_url = "/".join([base_url, artifact.findtext("relativePath")]) print "Downloading %s from URL %s" % (filename, artifact_url) urllib.urlretrieve(artifact_url , filename) new_artifacts.append(filename) return [revision, new_artifacts]
def get_version(self): '''obtain the version or just 2.2.x if < 2.3.x Raises: FailedRequestError: If the request fails. ''' if self._version: return self._version url = "{}/about/version.xml".format(self.service_url) resp = self.http_request(url) version = None if resp.status_code == 200: dom = XML(resp.content) resources = dom.findall("resource") for resource in resources: if resource.attrib["name"] == "GeoServer": try: version = resource.find("Version").text break except AttributeError: pass # This will raise an exception if the catalog is not available # If the catalog is available but could not return version information, # it is an old version that does not support that if version is None: # just to inform that version < 2.3.x version = "2.2.x" self._version = version return version
def _process_tokenizer(self, tokenizer): # tokenizer_tokens = [token for token in tokenizer] # no_marks = map(lambda s: s.strip('\'').strip(), tokenizer_tokens) no_marks = map(lambda s: s.strip("'").strip(), tokenizer) token_tree = XML(''.join(no_marks)) token_iter = token_tree.findall('.//') return (token for token in token_iter)
def getAllArtifactVersions(self, strGroupID, strArtifactID): atVersions = [] strPath = self.strUrlLuceneSearchGA % (strGroupID, strArtifactID) aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath) tSearchResult = XML(aucContent) # The search result must be complete. if tSearchResult.findtext('tooManyResults') != 'false': raise Exception("Received a truncated search result!") # Loop over all results. for tNode in tSearchResult.findall('data/artifact'): strVersion = tNode.findtext('version') if isinstance(strVersion, basestring) == True: strVersion = strVersion.strip() if strVersion == 'SNAPSHOT': tVersion = deploy_version.version(0, 0, 0) else: tVersion = deploy_version.version(strVersion) atVersions.append(tVersion) # Sort the versions. atVersions.sort() return atVersions
def identify(self, geosGeometry, geometryFieldName, layers, url, username, password): """ Assuming : Url like http://localhost:8080/geoserver/wfs layers like geonode:capa geosGeometry is in Lambert72 """ #TODO input checking gmlString = geosGeometry.ogr.gml payload = self._buildWfsIntersectRequest(gmlString, layers, geometryFieldName) #Verify False to avoid certificate not trusted problems r = requests.post(url, data = payload, auth=(username, password), verify=False) tree = XML(r.text) if tree.tag == "{http://www.opengis.net/ogc}ServiceExceptionReport": #We Got OGC Error. Find the root cause and throw a proper Exception se = tree.find('{http://www.opengis.net/ogc}ServiceException') raise Exception(str(se.text).strip()) else: clean_results = [] features = tree.findall('{http://www.opengis.net/gml}featureMember') for feature in features: attributes = {} for child in feature: for child_elem in child: tag_name = child_elem.tag.split('}')[-1] #Get rid of namespace if child_elem.text is not None: attributes[tag_name] = child_elem.text else: attributes[tag_name] = "" clean_results.append(attributes) return clean_results
def getAllArtifactVersions(self, strGroupID, strArtifactID): atVersions = [] strPath = self.strUrlLuceneSearchGA % (strGroupID, strArtifactID) aucContent = self.tRestDriver.get(self.tServerBaseUrl, strPath) tSearchResult = XML(aucContent) # The search result must be complete. if tSearchResult.findtext('tooManyResults')!='false': raise Exception("Received a truncated search result!") # Loop over all results. for tNode in tSearchResult.findall('data/artifact'): strVersion = tNode.findtext('version') if isinstance(strVersion, basestring)==True: strVersion = strVersion.strip() if strVersion=='SNAPSHOT': tVersion = deploy_version.version(0, 0, 0) else: tVersion = deploy_version.version(strVersion) atVersions.append(tVersion) # Sort the versions. atVersions.sort() return atVersions
def print_usa_cds(url): data = get_data(url) dx = XML(data) for cd in dx.findall("./CD"): if cd.find("./COUNTRY").text == "USA": print(cd.find("./TITLE").text)
def verify_app_status(netconf_connection, appplication_name, status="deployed"): ''' This procedure verifies the application status as deployed state. In order to change CPU and memory resources, Application need to be in deployed state. ''' oper_payload = ''' <filter> <virtual-services xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-virtual-service-oper"> <virtual-service> <name>{application}</name> <details> <state/> </details> </virtual-service> </virtual-services> </filter> ''' netconf_reply = xml.dom.minidom.parseString(str(netconf_connection.get(oper_payload.format(application=appplication_name)))) print(netconf_reply.toprettyxml( indent = " " )) if "<ok/>" not in (netconf_reply.toprettyxml(indent = " ")): return False oper_data = XML(netconf_reply.toxml("utf-8")) for data in oper_data.findall('{urn:ietf:params:xml:ns:netconf:base:1.0}data'): for element in data.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-virtual-service-oper}virtual-services'): for service in element.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-virtual-service-oper}virtual-service'): app_name = service.find('{http://cisco.com/ns/yang/Cisco-IOS-XE-virtual-service-oper}name').text for detail in service.findall('{http://cisco.com/ns/yang/Cisco-IOS-XE-virtual-service-oper}details'): app_status = detail.find('{http://cisco.com/ns/yang/Cisco-IOS-XE-virtual-service-oper}state').text if app_status.upper() == status.upper() return_val = True else: return_val = False return return_val
def get_version(self): '''obtain the version or just 2.2.x if < 2.3.x Raises: FailedRequestError: If the request fails. ''' if self._version: return self._version url = "{}/about/version.xml".format(self.service_url) resp = self.http_request(url) version = None if resp.status_code == 200: content = resp.content if isinstance(content, bytes): content = content.decode('UTF-8') dom = XML(content) resources = dom.findall("resource") for resource in resources: if resource.attrib["name"] == "GeoServer": try: version = resource.find("Version").text break except AttributeError: pass # This will raise an exception if the catalog is not available # If the catalog is available but could not return version information, # it is an old version that does not support that if version is None: # just to inform that version < 2.3.x version = "2.2.x" self._version = version return version
def get_used_subid(netconf_handler): ''' This procedure takes in the netconf handler for the switch and configures 2-event classification on the given interface. Procedure returns True if configuration successful, else returns False ''' netconf_reply = xml.dom.minidom.parseString( str( netconf_handler.get_config(source='running', filter=('xpath', "/mdt-config-data")))) config = XML(netconf_reply.toxml("utf-8")) subscription_id = 1 for data in config.findall( '{urn:ietf:params:xml:ns:netconf:base:1.0}data'): for configdata in data.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-mdt-cfg}mdt-config-data' ): for subscriptions in configdata.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-mdt-cfg}mdt-subscription' ): subscription_id = int( subscriptions.find( '{http://cisco.com/ns/yang/Cisco-IOS-XE-mdt-cfg}subscription-id' ).text) return subscription_id
def fetch_snp_records(ids): # 2010-07-12 11:57 Reece Hart <*****@*****.**> Most (all other?) # Entrez facilities use DTDs. dbSNP uses XSD (with namespaces), which # isn't supported by Entrez.read. Use xml.elementtree instead. xml = Entrez.efetch(db='snp',id=','.join(ids), retmode='xml').read() d = XML(xml) return map(_remap_dict_keys, map( _rs_elem_as_dict, d.findall('{http://www.ncbi.nlm.nih.gov/SNP/docsum}Rs')))
def sanitize_kml(kmlstring): kmldom = XML(kmlstring) ns = kmldom.tag.strip('kml') placemarks = kmldom.findall('.//%sPlacemark' % ns) for placemark in placemarks: summary = placemark.findall('%sdescription' % ns) summary[0].text = sanitize_kml_description(summary) return tostring(kmldom, 'utf-8')
def get_pubmed_id_from_title(titleEncoded): queryStrQuoted = quote("(%s)" % titleEncoded, '()') searchLink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s' % (queryStrQuoted) handle = urlopen(searchLink) data = handle.read() xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it pmidList = [x.text for x in xml.findall('.//Id')] # find xml "Id" elements return pmidList
def verify_sw_version(netconf_handler, min_major_release=16, min_minor_release=9, min_version=1): ''' This procedure verifies whether the software version of the switch with <netconf_handler>is greater than or equal to given <min_sw_version> Return True if condition satisfied, else return False ''' payload = ''' <filter> <device-hardware-data xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper"> <device-hardware> <device-system-data> <boot-time/> <software-version/> </device-system-data> </device-hardware> </device-hardware-data> </filter> ''' netconf_reply = xml.dom.minidom.parseString( str(netconf_handler.get(payload))) oper_data = XML(netconf_reply.toxml("utf-8")) print(netconf_reply.toprettyxml(indent=" ")) for data in oper_data.findall( '{urn:ietf:params:xml:ns:netconf:base:1.0}data'): for element in data.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper}device-hardware-data' ): for device in element.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper}device-hardware' ): for sysdata in device.findall( '{http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper}device-system-data' ): sw_version = sysdata.find( '{http://cisco.com/ns/yang/Cisco-IOS-XE-device-hardware-oper}software-version' ).text print(sw_version) release = re.findall(r'^.*Version (\d+)\.(\d+)\.(\d+)', sw_version, re.MULTILINE) if (int(release[0][0]) >= int(min_major_release)) and (int( release[0][1]) >= int(min_minor_release)) and (int( release[0][2]) >= int(min_version)): return_val = True else: print( "Sofware version currently running: %s minimum expected release %s.%s.%s" % (release, min_major_release, min_minor_release, min_version)) return_val = False return return_val
def get_licenses(self, feature_ids = [], payment_plan_ids = []): licenses = [] res = Base.api.do_request('GET', self.build_url('/licenses'), { 'featureIds': feature_ids, 'paymentPlanIds': payment_plan_ids }) xml = str(res['body'],'utf-8').replace("\n","") doc = XML(xml) for lnode in doc.findall('.//licenseCertificate'): license = License(xml_node = lnode) licenses.append(license) return licenses
def getLastCommiter(file): cmd = 'svn log --xml -l 1 %s' output = os.popen(cmd % file) content = output.read() output.close() root = XML(content) for item in root.findall('logentry'): return item.find('author').text, item.find('msg').text, item.find( 'date').text
def parse_cmml(cmml): data = [] root = XML(cmml) for clip in root.findall('clip'): timestamp = _npt_to_timestamp(clip.attrib['start']) url = '' # TODO: this text = clip.attrib['title'] data.append({'timestamp': timestamp, 'url': url, 'text': text}) return data
def __init__(self, build=BUILD): self.server = None self.win = list() self.mac = list() self.linux = list() url = urllib.urlopen(build + "api/xml") hudson_xml = url.read() url.close() root = XML(hudson_xml) artifacts = root.findall("./artifact") base_url = build + "artifact/" if len(artifacts) <= 0: raise AttributeError("No artifacts, please check build on Hudson.") for artifact in artifacts: filename = artifact.find("fileName").text if filename.startswith("OMERO.server"): self.server = base_url + artifact.find("relativePath").text elif filename.startswith('OMERO.source'): self.source = base_url + artifact.find("relativePath").text elif filename.startswith('OMERO.imagej') or\ filename.startswith('OMERO.java') or\ filename.startswith('OMERO.matlab') or\ filename.startswith('OMERO.py') or\ filename.startswith('OMERO.server'): pass elif filename.startswith("OMERO.importer"): regex = re.compile(r'.*win.zip') regex2 = re.compile(r'.*mac.zip') if not regex.match(filename) and not regex2.match(filename): self.linux.append(base_url + artifact.find("relativePath").text) else: regex = re.compile(r'.*win.zip') if regex.match(filename): self.win.append(base_url + artifact.find("relativePath").text) regex = re.compile(r'.*OSX.zip') if regex.match(filename): self.mac.append(base_url + artifact.find("relativePath").text) regex = re.compile(r'.*mac.zip') if regex.match(filename): self.mac.append(base_url + artifact.find("relativePath").text) regex = re.compile(r'.*b\d+.zip') if regex.match(filename): self.linux.append(base_url + artifact.find("relativePath").text)
def __parse_xml(self, xml): self.__name_map.clear() tree = XML(xml) # <node key="N" id="Name"> ... </node> list = tree.findall(".//node") for itr in list: self.__name_map[int(itr.get("key"))] = itr.get("id") self.__xml = xml
def parse_name_map(xml_node_list): name_map = {} tree = XML(xml_node_list) # <node key="N" id="Name"> ... </node> list = tree.findall(".//node") for itr in list: name_map[int(itr.get("key"))] = itr.get("id") return name_map
def get_profile_name(p): """ Takes a path object to the config xml """ xml = XML(p.text()) props = xml.findall("./properties") for x in props: id = x.attrib["id"] if id == "__ACTIVE__": for y in x.getchildren(): if y.attrib["name"] == "omero.config.profile": return y.attrib["value"]
def get_pmid_from_str(in_str): search_str = quote_plus(in_str) searchLink = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s' % (search_str) try: handle = urlopen(searchLink) data = handle.read() xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it pmidList = [x.text for x in xml.findall('.//Id')] # find xml "Id" elements if len(pmidList) > 1: pmidList = [pmidList[0]] except Exception, e: pmidList = []
def __init__(self, build=BUILD): self.server = None self.win = list() self.mac = list() self.linux = list() url = urllib.urlopen(build + "api/xml") hudson_xml = url.read() url.close() root = XML(hudson_xml) artifacts = root.findall("./artifact") base_url = build + "artifact/" if len(artifacts) <= 0: raise AttributeError("No artifacts, please check build on Hudson.") for artifact in artifacts: filename = artifact.find("fileName").text if filename.startswith("OMERO.server"): self.server = base_url + artifact.find("relativePath").text elif filename.startswith("OMERO.source"): self.source = base_url + artifact.find("relativePath").text elif ( filename.startswith("OMERO.imagej") or filename.startswith("OMERO.java") or filename.startswith("OMERO.matlab") or filename.startswith("OMERO.py") or filename.startswith("OMERO.server") ): pass elif filename.startswith("OMERO.importer"): regex = re.compile(r".*win.zip") regex2 = re.compile(r".*mac.zip") if not regex.match(filename) and not regex2.match(filename): self.linux.append(base_url + artifact.find("relativePath").text) else: regex = re.compile(r".*win.zip") if regex.match(filename): self.win.append(base_url + artifact.find("relativePath").text) regex = re.compile(r".*OSX.zip") if regex.match(filename): self.mac.append(base_url + artifact.find("relativePath").text) regex = re.compile(r".*mac.zip") if regex.match(filename): self.mac.append(base_url + artifact.find("relativePath").text) regex = re.compile(r".*b\d+.zip") if regex.match(filename): self.linux.append(base_url + artifact.find("relativePath").text)
def _getDescriptors(smiles): keys = defaultChemicalSpace.keys() vals = [] for key in keys: dnames = defaultChemicalSpace[key] url = descriptorBaseUrl + "." + key + "/" + smiles xml = "".join(urllib.urlopen(url).readlines()) root = XML(xml) values = root.findall("Descriptor") for value in values: if value.get("name") in dnames: vals.append(float(value.get("value"))) return vals
def get_pmid_from_doi(doiStr): doiSearchStr = re.sub("/doi/", "", doiStr) doiSearchStr = re.sub("/full", "", doiSearchStr) searchLink = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s[aid]" % (doiSearchStr) try: handle = urlopen(searchLink) data = handle.read() xml = XML(data) # convert to an xml object so we can apply x-path search fxns to it pmidList = [x.text for x in xml.findall(".//Id")] # find xml "Id" elements if len(pmidList) > 1: pmidList = [] except Exception, e: pmidList = []
def _get_group_ids(self): """ helper to fetch the set of geonetwork groups. """ # get the ids of the groups. get_groups_url = self.base + "srv/en/xml.info?" + urllib.urlencode({'type': 'groups'}) request = urllib2.Request(get_groups_url) response = self.urlopen(request) doc = XML(response.read()) groups = {} for gp in doc.findall('groups/group'): groups[gp.find('name').text.lower()] = gp.attrib['id'] return groups
def _get_operation_ids(self): """ helper to fetch the set of geonetwork 'operations' (privileges) """ # get the ids of the operations get_ops_url = self.base + "srv/en/xml.info?" + urllib.urlencode({'type': 'operations'}) request = urllib2.Request(get_ops_url) response = self.urlopen(request) doc = XML(response.read()) ops = {} for op in doc.findall('operations/operation'): ops[op.find('name').text.lower()] = op.attrib['id'] return ops
def getProcess(processName): xmlfile = os.popen("wmic.exe process list /format:rawxml.xsl") xmlcontent = '' for line in xmlfile.readlines(): xmlcontent += line root = XML(xmlcontent) for instance in root.findall('RESULTS/CIM/INSTANCE'): for property in instance.findall('PROPERTY'): if property.attrib['NAME'] == 'CommandLine': value = property.find('VALUE') if value != None and value.text.find(processName) != -1: return instance