def _process_result(self): document = XML(self._xml_data) if document.find(_FIND_DESCRIPTION) is None: logging.debug( 'Bundle %s not available in the server for the ' 'version %s', self._bundle.get_bundle_id(), config.version) version = None link = None size = None else: try: version = NormalizedVersion(document.find(_FIND_VERSION).text) except InvalidVersionError: logging.exception('Exception occured while parsing version') version = '0' link = document.find(_FIND_LINK).text try: size = long(document.find(_FIND_SIZE).text) * 1024 except ValueError: logging.exception('Exception occured while parsing size') size = 0 global _fetcher _fetcher = None self._completion_cb(self._bundle, version, link, size, None)
def _process_result(self): document = XML(self._xml_data) if document.find(_FIND_DESCRIPTION) is None: logging.debug('Bundle %s not available in the server for the ' 'version %s', self._bundle.get_bundle_id(), config.version) version = None link = None size = None else: try: version = NormalizedVersion(document.find(_FIND_VERSION).text) except InvalidVersionError: logging.exception('Exception occured while parsing version') version = '0' link = document.find(_FIND_LINK).text try: size = long(document.find(_FIND_SIZE).text) * 1024 except ValueError: logging.exception('Exception occured while parsing size') size = 0 global _fetcher _fetcher = None self._completion_cb(self._bundle, version, link, size, None)
def isatom(body): """Answers if the given response body looks like ATOM.""" root = XML(body) return \ root.tag == XNAME_FEED and \ root.find(XNAME_AUTHOR) is not None and \ root.find(XNAME_ID) is not None and \ root.find(XNAME_TITLE) is not None
def printXml(text): xml = XML(text) print "%s\t%.2f\t%.2f\t%s" % ( xml.get("name"), eval(xml.find("price").text), eval(xml.find("offer").text), "up" if xml.find("up").text == "True" else "down", )
def validate(self, input_xpath): """Validate an XPATH statement.""" element = XML('<xml></xml>') try: element.find(input_xpath) except Exception: return False return True
def get_status(url=None): url = url or os.environ['STATUS_URL'] response = urllib2.urlopen(url) try: xml = XML(response.read()) finally: response.close() result = {} el = xml.find('sms') result_d = result['sms'] = {} result_d['received'] = { 'total': int(el.findtext('received/total')), 'queued': int(el.findtext('received/queued')) } result_d['sent'] = { 'total': int(el.findtext('sent/total')), 'queued': int(el.findtext('sent/queued')) } result_d['storesize'] = int(el.findtext('storesize')) el = xml.find('dlr') result_d = result['dlr'] = {} result_d['received'] = {'total': int(el.findtext('received/total'))} result_d['sent'] = {'total': int(el.findtext('sent/total'))} result_d['queued'] = int(el.findtext('queued')) els = xml.find('smscs').findall('smsc') result_d = result['smscs'] = [] for el in els: result_d.append({ 'id': el.findtext('id'), 'admin_id': el.findtext('admin-id'), 'received': { 'sms': int( el.findtext('sms/received') or el.findtext('received/sms')), 'dlr': int( el.findtext('dlr/received') or el.findtext('received/dlr')) }, 'sent': { 'sms': int(el.findtext('sms/sent') or el.findtext('sent/sms')), 'dlr': int(el.findtext('dlr/sent') or el.findtext('sent/dlr')) }, 'failed': int(el.findtext('failed')), 'queued': int(el.findtext('queued')), 'status': el.findtext('status').split(' ', 2)[0] }) return result
class ASLOParser(): """XML parser to pull out data expressed in our aslo format.""" def __init__(self, xml_data): self.elem = XML(xml_data) def parse(self): try: self.version = self.elem.find( ".//{http://www.mozilla.org/2004/em-rdf#}version").text self.link = self.elem.find( ".//{http://www.mozilla.org/2004/em-rdf#}updateLink").text except: self.version = 0 self.link = None
def _get_capabilities(self): xml = XML(self.conn.getCapabilities()) capabilities = { 'canonical': xml.find('guest/arch[@name="x86_64"]/machine[@canonical]').attrib['canonical'], 'topology': {k: int(v) for k, v in xml.find('host/cpu/topology').attrib.items()}, 'model': xml.find('host/cpu/model').text, 'toggleable': [e.tag for e in xml.findall('guest/arch[@name="x86_64"]/../features/*[@toggle]')], } features = [element.attrib['name'] for element in self._features.findall('host/cpu/feature')] features.extend(self._list_features(capabilities['model'])) capabilities['features'] = features return capabilities
def isBuildInQueue(self): content = util.getContentByLink(address %(self.link)) root = XML(content) inQueue = root.find('inQueue').text == 'true' return inQueue
def updatequotewithparameters(self, quote_id,ExternalReference,Grossvalue,netvalue,postingDate,RefDate,SoldToParty,SoldToPartyAdd,Status,TaxAmt,ValidFrm,ValidTo): logging.info("SAP is sending quote with more parameters") logging.info(locals()) logging.info("CONNECTING TO SALESFORCE PARTNER WSDL FOR SESSION ID") url = "https://login.salesforce.com/services/Soap/u/28.0" data = """<?xml version="1.0" encoding="UTF-8"?> <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:urn="urn:partner.soap.sforce.com"> <soapenv:Header> <urn:CallOptions> <urn:client></urn:client> <urn:defaultNamespace></urn:defaultNamespace> </urn:CallOptions> <urn:LoginScopeHeader> <urn:organizationId></urn:organizationId> <urn:portalId></urn:portalId> </urn:LoginScopeHeader> </soapenv:Header> <soapenv:Body> <urn:login> <urn:username>{{username}}</urn:username> <urn:password>{{password}}</urn:password> </urn:login> </soapenv:Body> </soapenv:Envelope>""" t = Template(data) c = Context({ # "username": "******", # "password": "******" # "username": "******", # "password": "******" "username": "******", "password": "******" }) data = t.render(c) logging.info("SENDING:") logging.info(data) headers = { 'Content-Type': 'text/xml; charset=utf-8', 'SOAPAction' : 'https://login.salesforce.com/services/Soap/u/28.0' } # httplib2.debuglevel = 1 head = httplib2.Http() # head.follow_all_redirects = True response, content = head.request(url, "POST", smart_str(data), headers) logging.info("########### SESSION ID response ###############%s"%response) logging.info("########## SESSION ID content ############## \n %s"%pretty(content)) if response.get('status') == '200': logging.info("GOT THE SESSION ID FROM SALESFORCE") xml = XML(content) session_response=xml.find("{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren()[0] session_id = session_response[0][4].text quote_id_to_sf(session_id,quote_id,ExternalReference,Grossvalue,netvalue,postingDate,RefDate,SoldToParty,SoldToPartyAdd,Status,TaxAmt,ValidFrm,ValidTo) else: return content return "OK"
def test_manifest_xsf_03(self): "Test M2M field." body_map = {'user_id': 1, 'language': ''} backend = self._get_backend( ContactFakeBackend, subject='create_contact', body_map=body_map, model=Contact, ) builder = self._get_builder(backend) content = builder._render_manifest_xsf(self.request) self.assertEqual( re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content)['ns'], builder.get_namespace() ) # Can't be got with ElementTree, because it's a namespace xsf = '{http://schemas.microsoft.com/office/infopath/2003/solutionDefinition}' xml2edit_node = XML(content).find(f'{xsf}views/{xsf}view/{xsf}editing/{xsf}xmlToEdit') self.assertIsNotNone(xml2edit_node) self.assertEqual('language', xml2edit_node.get('name')) self.assertEqual( '/my:CremeCRMCrudity/my:language/my:language_value', xml2edit_node.get('item') ) self.assertEqual('xTextList', xml2edit_node.find(f'{xsf}editWith').get('component'))
def _dispatchSoapRequest(self, request): try: try: envelope = XML(request.soap_data) body = envelope.find("{http://schemas.xmlsoap.org/soap/envelope/}Body") # determine UPnP action action = body.find("{%s}%s" % (request.soap_ns, request.soap_action)) # look up the action in the service upnp_action = self.service._actions[request.soap_action] # build a list of the action arguments in_args = {} for arg in action: in_args[arg.tag] = arg.text # execute the UPnP action logger.log_debug("executing %s#%s" % (self.service.serviceID, request.soap_action)) out_args = upnp_action(request, self.service, in_args) # return the action response env = Element("s:Envelope") env.attrib['xmlns:s'] = "http://schemas.xmlsoap.org/soap/envelope/" env.attrib['s:encodingStyle'] = "http://schemas.xmlsoap.org/soap/encoding/" env.attrib['xmlns:i'] = "http://www.w3.org/1999/XMLSchema-instance" body = SubElement(env, "s:Body") resp = SubElement(body, "u:%sResponse" % request.soap_action) resp.attrib['xmlns:u'] = request.soap_ns for (name,type,value) in out_args: arg = SubElement(resp, name) arg.attrib["i:type"] = type arg.text = value output = xmlprint(env) return HttpResponse(200, headers={'EXT': ''}, stream=output) except UPNPError, e: raise e except Exception, e: logger.log_error("caught unhandled exception: %s" % e) raise UPNPError(500, "Internal server error")
def get_cell_group(self): ncx_file = self.get_ncx_file() with open(ncx_file,'r') as f: xml_str = f.read() neuroml = XML(xml_str) # The NeuroML file in parsable form. cell_group = neuroml.find("object/void[@property='allSimConfigs']/void/object/void[@property='cellGroups']/void/string").text return cell_group
def identify(self, geosGeometry, geometryFieldName, layers, url, username, password): """ Assuming : Url like http://localhost:8080/geoserver/wfs layers like geonode:capa geosGeometry is in Lambert72 """ #TODO input checking gmlString = geosGeometry.ogr.gml payload = self._buildWfsIntersectRequest(gmlString, layers, geometryFieldName) #Verify False to avoid certificate not trusted problems r = requests.post(url, data = payload, auth=(username, password), verify=False) tree = XML(r.text) if tree.tag == "{http://www.opengis.net/ogc}ServiceExceptionReport": #We Got OGC Error. Find the root cause and throw a proper Exception se = tree.find('{http://www.opengis.net/ogc}ServiceException') raise Exception(str(se.text).strip()) else: clean_results = [] features = tree.findall('{http://www.opengis.net/gml}featureMember') for feature in features: attributes = {} for child in feature: for child_elem in child: tag_name = child_elem.tag.split('}')[-1] #Get rid of namespace if child_elem.text is not None: attributes[tag_name] = child_elem.text else: attributes[tag_name] = "" clean_results.append(attributes) return clean_results
def get_journal(pmid): MAXURLTRIES = 5 numTries = 0 success = False link = esummary % (pmid) req = Request(link) while numTries < MAXURLTRIES and success == False: try: handle = urlopen(req) success = True except (URLError, HTTPError, BadStatusLine, ParseError): print ' failed %d times' % numTries numTries += 1 if numTries == MAXURLTRIES: journal_title = None return journal_title try: data = handle.read() xml = XML(data) journalXML = xml.find('.//FullJournalName') if journalXML is not None: journal_title = journalXML.text else: journal_title = None return journal_title except Exception: journal_title = None return journal_title
def test_manifest_xsf_02(self): "Test Image fk field." body_map = {'user_id': 1, 'image': ''} backend = self._get_backend( ContactFakeBackend, subject='create_contact', body_map=body_map, model=Contact, ) builder = self._get_builder(backend) content = builder._render_manifest_xsf(self.request) self.assertEqual(re.search(r'xmlns:my="(?P<ns>[\w\d\-:/\.]*)"', content)['ns'], builder.get_namespace() ) # Can't be got with ElementTree, because it's a namespace xsf = '{http://schemas.microsoft.com/office/infopath/2003/solutionDefinition}' xml = XML(content) xmlToEdit_node = xml.find(f'{xsf}views/{xsf}view/{xsf}editing/{xsf}xmlToEdit') self.assertIsNotNone(xmlToEdit_node) self.assertEqual('image', xmlToEdit_node.get('name')) self.assertEqual('/my:CremeCRMCrudity/my:image', xmlToEdit_node.get('item')) button_nodes = xml.findall(f'{xsf}views/{xsf}view/{xsf}menuArea/{xsf}button') self.assertTrue(button_nodes) self.assertSetEqual( {'image'}, {button_node.get('xmlToEdit') for button_node in button_nodes} )
def xml_to_entry(xml, confkey, entry_type, fields, short_year): """ transform a DBLP xml entry of type "entry_type" into a dictionnary ready to be output as bibtex """ try: tree = XML(xml) except ElementTree.ParseError as e: logging.exception("XML Parsing Error") return None, None elt = tree.find(entry_type.lower()) if elt is None: logging.warning('Entry type is not "{0}"'.format(entry_type)) return None, None entry = {} authors = [] # list of pairs (full author name, last name for BibTeX key) pages_error = None for e in elt: if e.tag == "author": authors.append( get_author_name_and_for_key(clean_author(unicode(e.text)))) elif e.tag in fields: val = xml_get_value(e) # e.text if e.tag == "pages": r = re_pages.match(val) if r is None: pages_error = val else: a = r.group(1) b = r.group(2) c = r.group(3) if a == "" or (b is not None and c == ""): pages_error = val if b is None: val = a else: val = a + "--" + c elif e.tag == "title": if val[-1] == ".": val = val[:-1] entry[e.tag] = html_to_bib_value(val, title=(e.tag == "title")) if e.tag == "ee" and "doi" in fields: doi_ee_re = re.compile( r"^https?://(?:(?:dx.)?doi.org|doi.acm.org)/(.*)$") p = doi_ee_re.match(e.text) if p: if "doi" not in entry: entry["doi"] = html_to_bib_value(p.group(1)) authors_bibtex = [a[0] for a in authors] entry["author"] = html_to_bib_value( (u" and \n" + " " * 18).join(authors_bibtex)) authors_last_names = [a[1] for a in authors] key = authors_to_key(authors_last_names, confkey, short_year) if pages_error is not None: logging.error("Entry \"{}\": error in pages (\"{}\")".format( key, pages_error)) return key, entry
def loadFromXml(self, xmlContent): root = XML(xmlContent) for entry in root.find("CcyTbl").findall("CcyNtry"): if entry.find("Ccy") is not None: self.addEntry( entry.find("Ccy").text, entry.find("CcyNm").text, entry.find("CtryNm").text)
def get_status(url = os.environ['STATUS_URL']): response = urllib2.urlopen(url) try: xml = XML(response.read()) finally: response.close() result = {} el = xml.find('sms') result_d = result['sms'] = {} result_d['received'] = {'total': int(el.findtext('received/total')), 'queued': int(el.findtext('received/queued'))} result_d['sent'] = {'total': int(el.findtext('sent/total')), 'queued': int(el.findtext('sent/queued'))} result_d['storesize'] = int(el.findtext('storesize')) el = xml.find('dlr') result_d = result['dlr'] = {} result_d['received'] = {'total': int(el.findtext('received/total'))} result_d['sent'] = {'total': int(el.findtext('sent/total'))} result_d['queued'] = int(el.findtext('queued')) els = xml.find('smscs').findall('smsc') result_d = result['smscs'] = [] for el in els: result_d.append({ 'id': el.findtext('id'), 'admin_id': el.findtext('admin-id'), 'received': { 'sms': int(el.findtext('sms/received')), 'dlr': int(el.findtext('dlr/received')) }, 'sent': { 'sms': int(el.findtext('sms/sent')), 'dlr': int(el.findtext('dlr/sent')) }, 'failed' : int(el.findtext('failed')), 'queued' : int(el.findtext('queued')), 'status' : el.findtext('status').split(' ', 2)[0] }) return result
def __downloader_complete_cb(self, downloader, result): if isinstance(result, Exception): self.emit('check-complete', result) return if result is None: _logger.error('No XML update data returned from ASLO') return document = XML(result.get_data()) if document.find(_FIND_DESCRIPTION) is None: _logger.debug('Bundle %s not available in the server for the ' 'version %s', self._bundle.get_bundle_id(), config.version) version = None link = None size = None self.emit('check-complete', None) return try: version = NormalizedVersion(document.find(_FIND_VERSION).text) except InvalidVersionError: _logger.exception('Exception occurred while parsing version') self.emit('check-complete', None) return link = document.find(_FIND_LINK).text try: size = long(document.find(_FIND_SIZE).text) * 1024 except ValueError: _logger.exception('Exception occurred while parsing size') size = 0 if version > NormalizedVersion(self._bundle.get_activity_version()): result = BundleUpdate(self._bundle.get_bundle_id(), self._bundle.get_name(), version, link, size) else: result = None self.emit('check-complete', result)
def __downloader_complete_cb(self, downloader, result): if isinstance(result, Exception): self.emit('check-complete', result) return if result is None: _logger.error('No XML update data returned from ASLO') return document = XML(result.get_data()) if document.find(_FIND_DESCRIPTION) is None: _logger.debug( 'Bundle %s not available in the server for the ' 'version %s', self._bundle.get_bundle_id(), config.version) version = None link = None size = None self.emit('check-complete', None) return try: version = NormalizedVersion(document.find(_FIND_VERSION).text) except InvalidVersionError: _logger.exception('Exception occurred while parsing version') self.emit('check-complete', None) return link = document.find(_FIND_LINK).text try: size = long(document.find(_FIND_SIZE).text) * 1024 except ValueError: _logger.exception('Exception occurred while parsing size') size = 0 if version > NormalizedVersion(self._bundle.get_activity_version()): result = BundleUpdate(self._bundle.get_bundle_id(), self._bundle.get_name(), version, link, size) else: result = None self.emit('check-complete', result)
def test_case_actions_are_removed_when_true(self): xml_with_case_create = ''' <data> <question>answer</question> <case> <create></create> <update></update> <close></close> </case> </data> '''.strip() cleaned_xml = _clean_xml_for_partial_submission( xml_with_case_create, should_remove_case_actions=True) xml = XML(cleaned_xml) self.assertIsNotNone(xml.find('case')) self.assertIsNone(xml.find('case').find('create')) self.assertIsNone(xml.find('case').find('update')) self.assertIsNone(xml.find('case').find('close'))
def open_rest(self): self.rest = _read(codecs.open(self.path, 'r', 'utf-8')) xhtml = publish_string(self.rest, writer_name='xml', settings_overrides=dict(report_level=5)) x = XML(xhtml) # parse the XML text t = x.find('title') try: self.title = t.text #extract its title except AttributeError: self.title = 'Untitled'
def isBuildRunning(self): content = util.getContentByLink(address %(self.getLastBuildLink())) root = XML(content) running = root.find('building').text == 'true' if(running): logging.info(' there is a building running') return running
def _list_alchemy_results(xml,relevance): dom = XML(xml) results = [] if dom.find('status').text == 'OK': for concept in dom.findall('.//concept'): if float(concept.find('relevance').text) > relevance: results.append(concept.find('text').text) for kw in dom.findall('.//keyword'): if float(kw.find('relevance').text) > relevance: results.append(kw.find('text').text) return results
def _list_alchemy_results(xml, relevance): dom = XML(xml) results = [] if dom.find("status").text == "OK": for concept in dom.findall(".//concept"): if float(concept.find("relevance").text) > relevance: results.append(concept.find("text").text) for kw in dom.findall(".//keyword"): if float(kw.find("relevance").text) > relevance: results.append(kw.find("text").text) return results
def pmc_id_to_pmid(pmc_id_list): pmid_list = [] for pmcid in pmc_id_list: link = pmc_pmid_url % (pmcid) req = Request(link) handle = urlopen(req) data = handle.read() xml = XML(data) pmid = int(xml.find('record').attrib['pmid']) pmid_list.append(pmid) return pmid_list
def _get_build_link_from_response(self, response): """ Extract the link to the new build from the XML returned - unfortunately bamboo api doesn't give back json data for this method :param response: response text of the post request to kick off the new build :return: href of the link to the new build - used to get the status """ response_xml = XML(response) self.log.debug("Parsing build response for build link:" + response) build_link = response_xml.find("link").attrib['href'] self.log.debug("Found build link: " + build_link) return build_link
def getLastBuildLink(self): content = util.getContentByLink(address %(self.link)) root = XML(content) lastBuildNo = root.find('build/number').text lastBuildLink = '%s/%s' %(self.link, lastBuildNo) self.latestLink= lastBuildLink logging.info('the latest build address is %s' %(lastBuildLink)) return lastBuildLink
def convert_rest_to_wp(restFile): 'convert ReST to WP html using docutils, rst2wp' from docutils.core import publish_string import rst2wp from xml.etree.ElementTree import XML ifile = file(restFile) # read our restructured text rest = ifile.read() ifile.close() xhtml = publish_string(rest, writer_name='xml') x = XML(xhtml) # parse the XML text title = x.find('title').text #extract its title writer = rst2wp.Writer() html = publish_string(rest, writer=writer) # convert to wordpress return title,html
def set_metadata_privs(self, uuid, privileges): """ set the full set of geonetwork privileges on the item with the specified uuid based on the dictionary given of the form: { 'group_name1': {'operation1': True, 'operation2': True, ...}, 'group_name2': ... } all unspecified operations and operations for unspecified groups are set to False. """ # XXX This is a fairly ugly workaround that makes # requests similar to those made by the GeoNetwork # admin based on the recommendation here: # http://bit.ly/ccVEU7 get_dbid_url = self.base + 'srv/en/portal.search.present?' + urllib.urlencode({'uuid': uuid}) # get the id of the data. request = urllib2.Request(get_dbid_url) response = self.urlopen(request) doc = XML(response.read()) data_dbid = doc.find('metadata/{http://www.fao.org/geonetwork}info/id').text # update group and operation info if needed if len(self._group_ids) == 0: self._group_ids = self._get_group_ids() if len(self._operation_ids) == 0: self._operation_ids = self._get_operation_ids() # build params that represent the privilege configuration priv_params = { "id": data_dbid, # "uuid": layer.uuid, # you can say this instead in newer versions of GN } for group, privs in privileges.items(): group_id = self._group_ids[group.lower()] for op, state in privs.items(): if state != True: continue op_id = self._operation_ids[op.lower()] priv_params['_%s_%s' % (group_id, op_id)] = 'on' # update all privileges update_privs_url = self.base + "srv/en/metadata.admin?" + urllib.urlencode(priv_params) request = urllib2.Request(update_privs_url) response = self.urlopen(request)
def getLastBuildStatus(self): while(self.isBuildInQueue()): import time logging.info('a queue is pending, wait after 180 seconds') time.sleep(180) while(self.isBuildRunning()): import time logging.info('build is ongoing, retry after 60 seconds') time.sleep(60) content = util.getContentByLink(address %(self.latestLink)) root = XML(content) return 'SUCCESS' == root.find('result').text
def setUp(self): self.path = reverse('ngc-order-submit') order_submit.connect(self.order_submit_receiver) # semi-hacky. get the order-submit requiest to hit the file on disk # see use of 'order_submit_url' in views.py ngc_settings.API_BASE_URL = 'file://{0}'.format(self.data_dir) ngc_settings.MERCHANT_ID = self.checkout_redirect_fn checkout_redirect_xml_path = \ OrderSubmitView.order_submit_frmt_str.format( NGC_API_BASE_URL=self.data_dir, NGC_MERCHANT_ID=self.checkout_redirect_fn) cr_xml = XML(open(join(checkout_redirect_xml_path)).read()) self.serial_number = cr_xml.get('serial-number') self.redirect_url = cr_xml.find(xpq_redirect_url).text
def ise_m_activecount(ip, username, password, port=443): # This sets up the https connection context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # ssl支持的协议版本 context.verify_mode = ssl.CERT_NONE # CERT_NONE, CERT_OPTIONAL or CERT_REQUIRED(并不检查证书有效性) context.load_verify_locations("/usr/share/kde4/apps/kssl/ca-bundle.crt") # 根证书文件 c = HTTPSConnection(ip, port=port, context=context) user_pass_str = username + ":" + password user_pass_str_encode = user_pass_str.encode() userAndPass = b64encode(user_pass_str_encode).decode("ascii") headers = {"Authorization": "Basic %s" % userAndPass} c.request("GET", "/admin/API/mnt/Session/ActiveCount", headers=headers) res = c.getresponse() data = res.read() root = XML(data.decode()) activecount = root.find("count").text return activecount
def wepay_callback(request): """ 微信支付回调 :param request: :return: b'<xml><appid><![CDATA[wxed799b3eb9f64be6]]></appid>\n<bank_type><![CDATA[CFT]]> </bank_type>\n<cash_fee><![CDATA[1]]></cash_fee>\n<fee_type><![CDATA[CNY]]></fee_type>\n <is_subscribe><![CDATA[N]]></is_subscribe>\n<mch_id><![CDATA[1559936921]]> </mch_id>\n<nonce_str><![CDATA[ycrx0zv56bwk3tsf]]></nonce_str>\n<openid><![CDATA[o4g7TwMCAPuanofbARkfObaxDZLE]]> </openid>\n<out_trade_no><![CDATA[123456789sdf]]></out_trade_no>\n<result_code><![CDATA[SUCCESS]]> </result_code>\n<return_code><![CDATA[SUCCESS]]></return_code>\n<sign><![CDATA[CBE639933B4F4A3F99358D2F4EA4F6E0]]> </sign>\n<time_end><![CDATA[20191121023054]]></time_end>\n<total_fee>1</total_fee>\n<trade_type><![CDATA[NATIVE]]> </trade_type>\n<transaction_id><![CDATA[4200000460201911210380450477]]> </transaction_id>\n</xml>' """ body = request.body if not body: return HttpResponse((''' <xml> <return_code><![CDATA[FAIL]]></return_code> <return_msg><![CDATA[FAIL]]></return_msg> </xml> ''')) xml = XML(body) wp = WePayDoPay(out_trade_no='', total_fee=0, body='') success = wp.verify_notice_sign(xml) if not success: return HttpResponse(''' <xml> <return_code><![CDATA[FAIL]]></return_code> <return_msg><![CDATA[Sign error]]></return_msg> </xml> ''') out_trade_no = xml.find('out_trade_no').text ''' 支付成功逻辑 ''' # print('支付成功!') return HttpResponse((''' <xml> <return_code><![CDATA[SUCCESS]]></return_code> <return_msg><![CDATA[OK]]></return_msg> </xml> '''))
def quote_id_to_sf(session_id, quote_id): logging.info( "############## CONNECTING TO SALESFORCE QUOTE WSDL ##############") url = "https://ap1.salesforce.com/services/Soap/class/QuoteClass1" data = """<?xml version="1.0" encoding="UTF-8"?> <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:quot="http://soap.sforce.com/schemas/class/QuoteClass1"> <soapenv:Header> <quot:SessionHeader> <quot:sessionId>{{session_id}}</quot:sessionId> </quot:SessionHeader> </soapenv:Header> <soapenv:Body> <quot:insertQuote> <quot:quoteId>{{quote_id}}</quot:quoteId> </quot:insertQuote> </soapenv:Body> </soapenv:Envelope>""" t = Template(data) c = Context({"session_id": session_id, "quote_id": quote_id}) data = t.render(c) logging.info("SENDING:") logging.info(data) headers = { 'Content-Type': 'text/xml; charset=utf-8', 'SOAPAction': 'https://ap1.salesforce.com/services/Soap/class/QuoteClass1' } # httplib2.debuglevel = 1 head = httplib2.Http() # head.follow_all_redirects = True response, content = head.request(url, "POST", smart_str(data), headers) logging.info("######################### QUOTE response ############## %s" % response) logging.info( "###################### QUOTE content ################# \n%s" % pretty(content)) if response.get('status') == "200": xml = XML(content) quote_response = xml.find( "{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren()[0] return
def svn_log(pkgbase, repo): '''Retrieve the most recent SVN log entry for the given pkgbase and repository. The configured setting SVN_BASE_URL is used along with the svn_root for each repository to form the correct URL.''' path = '%s%s/%s/trunk/' % (settings.SVN_BASE_URL, repo.svn_root, pkgbase) cmd = ['svn', 'log', '--limit=1', '--xml', path] log_data = subprocess.check_output(cmd) # the XML format is very very simple, especially with only one revision xml = XML(log_data) revision = int(xml.find('logentry').get('revision')) date = datetime.strptime(xml.findtext('logentry/date'), '%Y-%m-%dT%H:%M:%S.%fZ') return { 'revision': revision, 'date': date, 'author': xml.findtext('logentry/author'), 'message': xml.findtext('logentry/msg'), }
def ise_m_activecount(ip, username, password, port=443): #This sets up the https connection context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) #ssl支持的协议版本 context.verify_mode = ssl.CERT_NONE #CERT_NONE, CERT_OPTIONAL or CERT_REQUIRED(并不检查证书有效性) context.load_verify_locations( '/usr/share/kde4/apps/kssl/ca-bundle.crt') #根证书文件 c = HTTPSConnection(ip, port=port, context=context) user_pass_str = username + ':' + password user_pass_str_encode = user_pass_str.encode() userAndPass = b64encode(user_pass_str_encode).decode("ascii") headers = {'Authorization': 'Basic %s' % userAndPass} c.request('GET', '/admin/API/mnt/Session/ActiveCount', headers=headers) res = c.getresponse() data = res.read() root = XML(data.decode()) activecount = root.find('count').text return activecount
def load_vmpp(file_location): print 'Loading VMPPs.' counter = 0 current_pct = 0.0 start_time = time.time() with open(file_location) as vmpp_file: all_vmpp_xml = XML(vmpp_file.read()) all_vmpps = all_vmpp_xml.find('VMPPS').findall('VMPP') total_count = len(all_vmpps) for vmpp_xml in all_vmpps: vppid = vmpp_xml.find('VPPID') # invalid = vmpp_xml.find('INVALID') nm = vmpp_xml.find('NM') # abbrevnm = vmpp_xml.find('ABBREVNM') vpid = vmpp_xml.find('VPID') # qtyval = vmpp_xml.find('QTYVAL') # qty_uomcd = vmpp_xml.find('QTY_UOMCD') # combpackcd = vmpp_xml.find('COMBPACKCD') vmpp = VirtualMedicinalProductPack(vppid=int(vppid.text), nm=nm.text, vpid_id=int(vpid.text), qtyval=qtyval.text, qty_uomcd=qty_uomcd.text ) if invalid is not None: vmpp.invalid = invalid.text if abbrevnm is not None: vmpp.abbrevnm = abbrevnm.text if combpackcd is not None: vmpp.combpackcd = combpackcd.text vmpp.save() counter += 1 if ((float(counter) / float(total_count)) * 100.0) >= current_pct: print ' %d%%' % (current_pct), current_pct += 10 print '... processed %d VMPPs in %ds.' % (counter, (time.time() - start_time))
def test_timeEnd_value_is_set(self): xml_with_case_action = ''' <data> <question>answer</question> <meta> <timeEnd/> </meta> </data> '''.strip() now = utcnow() expected_time_end = json_format_datetime(now) with patch('corehq.apps.smsforms.app.utcnow', return_value=now): cleaned_xml = _clean_xml_for_partial_submission( xml_with_case_action, should_remove_case_actions=True) xml = XML(cleaned_xml) self.assertEqual( xml.find('meta').find('timeEnd').text, expected_time_end)
def getLibId(self, library): try: #Get or Set X-Plex-Token self.getToken() #Loop Over All Ids and update self.logger.info("Getting id for Plex Library: " + library) #Set URL url = self.host + "/library/sections?X-Plex-Token=" + self.token #Make Request r = requests.get(url) id = None #Check for Successful Request. If Status Code is 200 and text is empty = Success if r.status_code != 200: self.logger.error("Error " + str(r.status_code) + " Getting Plex Library From URL: " + url) self.logger.error(r.text) #Throw Exception if there is a Request Error raise Exception( 'Error Getting Library. Could Not Connect to: ' + url) else: elem = XML(r.text) tvNode = elem.find( './/Directory[@title="{library}"]'.format(library=library)) if tvNode is None: raise Exception("Error Could Not find Library: " + library) else: id = tvNode.attrib['key'] #Log Success self.logger.info("Successfully Got Libary: id: " + str(id)) return id except Exception as err: #Log Exception self.logger.exception(err)
def quote_id_to_sf(session_id,quote_id): logging.info("############## CONNECTING TO SALESFORCE QUOTE WSDL ##############") url = "https://ap1.salesforce.com/services/Soap/class/QuoteClass1" data = """<?xml version="1.0" encoding="UTF-8"?> <soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:quot="http://soap.sforce.com/schemas/class/QuoteClass1"> <soapenv:Header> <quot:SessionHeader> <quot:sessionId>{{session_id}}</quot:sessionId> </quot:SessionHeader> </soapenv:Header> <soapenv:Body> <quot:insertQuote> <quot:quoteId>{{quote_id}}</quot:quoteId> </quot:insertQuote> </soapenv:Body> </soapenv:Envelope>""" t = Template(data) c = Context({ "session_id": session_id, "quote_id": quote_id }) data = t.render(c) logging.info("SENDING:") logging.info(data) headers = { 'Content-Type': 'text/xml; charset=utf-8', 'SOAPAction' : 'https://ap1.salesforce.com/services/Soap/class/QuoteClass1' } # httplib2.debuglevel = 1 head = httplib2.Http() # head.follow_all_redirects = True response, content = head.request(url, "POST", smart_str(data), headers) logging.info("######################### QUOTE response ############## %s"%response) logging.info("###################### QUOTE content ################# \n%s"%pretty(content)) if response.get('status') == "200": xml = XML(content) quote_response=xml.find("{http://schemas.xmlsoap.org/soap/envelope/}Body").getchildren()[0] return
def settings(self): settings = {} settings_url = url(self.catalog.service_url, ['settings.xml']) headers, response = self.http.request(settings_url, 'GET') if headers.status != 200: raise Exception('Settings listing failed - %s, %s' % (headers,response)) dom = XML(response) sections = ['settings', 'jai','coverageAccess'] for section in sections: params = [] node = dom.find(section) if node is not None: #it will be none if the catalog does not support this operation for entry in node: if len(entry) == 0: params.append((entry.tag, entry.text)) else: for subentry in entry: params.append((entry.tag + '/' + subentry.tag, subentry.text)) settings[section] = params return settings
def update(self): """ Update Monit deamon and services status. """ url = self.baseurl + '/_status?format=xml' response = self.s.get(url) response.raise_for_status() from xml.etree.ElementTree import XML root = XML(response.text) # parse platform info platform = Monit.Platform(root.find('platform')) self['platform'] = platform # parse services inside response for serv_el in root.iter('service'): serv = Monit.Service(self, serv_el) self[serv.name] = serv # Pendingaction occurs when a service is stopping if self[serv.name].pendingaction: time.sleep(1) return Monit.update(self) # Monitor == 2 when service in startup if self[serv.name].monitorState == 2: time.sleep(1) return Monit.update(self)
def add_single_article_full(pmid): MAXURLTRIES = 5 numTries = 0 success = False link = efetch % (pmid) req = Request(link) while numTries < MAXURLTRIES and success == False: try: handle = urlopen(req) success = True except (URLError, HTTPError, BadStatusLine, ParseError): print ' failed %d times' % numTries numTries += 1 if numTries == MAXURLTRIES: a = None return a try: data = handle.read() xml = XML(data) titleXML = xml.find('.//ArticleTitle') if titleXML is not None: title = titleXML.text else: title = ' ' # generate a new article in db a = m.Article.objects.get_or_create(title=title, pmid = pmid)[0] except Exception: return None a.save() return a # add journalTitle to article journalTitle = xml.find('.//Title') if journalTitle is not None: j = m.Journal.objects.get_or_create(title = journalTitle.text)[0] a.journal = j if j.short_title is None: shortJournalTitleXML = xml.find('.//ISOAbbreviation') if shortJournalTitleXML is not None: j.short_title = shortJournalTitleXML.text j.save() # add authors to article authorList = xml.findall(".//Author[@ValidYN='Y']") if len(authorList) == 0: authorList = xml.findall(".//Author") authorListText = [] for author in authorList: try: last = author.find("./LastName").text fore = author.find("./ForeName").text initials = author.find("./Initials").text #print last, fore, initials authorOb = m.Author.objects.get_or_create(first=fore, last=last, initials=initials)[0] a.authors.add(authorOb) currAuthorStr = '%s %s' % (last, initials) authorListText.append(currAuthorStr) except AttributeError: continue author_list_str = '; '.join(authorListText) author_list_str = author_list_str[0:min(len(author_list_str), 500)] a.author_list_str = author_list_str #get publication year pub_year = xml.find(".//PubDate/Year") if pub_year is not None: a.pub_year = int(pub_year.text) # find mesh terms and add them to db for x in xml.findall('.//DescriptorName'): if x.text is not None: mesh = m.MeshTerm.objects.get_or_create(term = x.text)[0] a.terms.add(mesh) # find substances and add them to db for x in xml.findall('.//NameOfSubstance'): if x.text is not None: s = m.Substance.objects.get_or_create(term = x.text)[0] a.substances.add(s) abstractXML = xml.findall('.//AbstractText') abstract = ' ' if len(abstractXML) > 0: abstractList = [x.text for x in abstractXML] abstractList = filter(None, abstractList) abstract = ' '.join(abstractList) a.abstract = abstract url = get_article_full_text_url(pmid) a.full_text_link = url a.save() return a
def get_console_device(self): """returns console device""" xml = XML(self.domain.XMLDesc(0)) return xml.find('devices/console[@type="pty"]').attrib['tty']
def get_nightly_binary_path(self, nightly_date): if nightly_date is None: return if not nightly_date: print( "No nightly date has been provided although the --nightly or -n flag has been passed.") sys.exit(1) # Will alow us to fetch the relevant builds from the nightly repository os_prefix = "linux" if is_windows(): os_prefix = "windows-msvc" if is_macosx(): os_prefix = "mac" nightly_date = nightly_date.strip() # Fetch the filename to download from the build list repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly" req = urllib2.Request( "{}/{}/{}".format(repository_index, os_prefix, nightly_date)) try: response = urllib2.urlopen(req).read() tree = XML(response) namespaces = {'ns': tree.tag[1:tree.tag.index('}')]} file_to_download = tree.find('ns:Contents', namespaces).find( 'ns:Key', namespaces).text except urllib2.URLError as e: print("Could not fetch the available nightly versions from the repository : {}".format( e.reason)) sys.exit(1) except AttributeError as e: print("Could not fetch a nightly version for date {} and platform {}".format( nightly_date, os_prefix)) sys.exit(1) nightly_target_directory = path.join(self.context.topdir, "target") # ':' is not an authorized character for a file name on Windows # make sure the OS specific separator is used target_file_path = file_to_download.replace(':', '-').split('/') destination_file = os.path.join( nightly_target_directory, os.path.join(*target_file_path)) # Once extracted, the nightly folder name is the tar name without the extension # (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz) destination_folder = os.path.splitext(destination_file)[0] nightlies_folder = path.join( nightly_target_directory, 'nightly', os_prefix) # Make sure the target directory exists if not os.path.isdir(nightlies_folder): print("The nightly folder for the target does not exist yet. Creating {}".format( nightlies_folder)) os.makedirs(nightlies_folder) # Download the nightly version if os.path.isfile(path.join(nightlies_folder, destination_file)): print("The nightly file {} has already been downloaded.".format( destination_file)) else: print("The nightly {} does not exist yet, downloading it.".format( destination_file)) download_file(destination_file, NIGHTLY_REPOSITORY_URL + file_to_download, destination_file) # Extract the downloaded nightly version if os.path.isdir(destination_folder): print("The nightly folder {} has already been extracted.".format( destination_folder)) else: self.extract_nightly(nightlies_folder, destination_folder, destination_file) return self.get_executable(destination_folder)
def loadFromXml(self, xmlContent): root = XML(xmlContent) for entry in root.find("CcyTbl").findall("CcyNtry"): if entry.find("Ccy") is not None: self.addEntry(entry.find("Ccy").text, entry.find("CcyNm").text, entry.find("CtryNm").text)
def get_vnc_port(self): """returns VNC port number""" xml = XML(self.domain.XMLDesc(0)) return int(xml.find('devices/graphics[@type="vnc"]').attrib['port'])
def parse(self): f = ZipFile(self.path, "r") data_xml = f.read('document.mwb.xml') f2 = open('data.xml', 'w') f2.write(data_xml) f2.close() tree = XML(data_xml) catalog = tree.find(".//value[@key='catalog']") tables = catalog.find(".//value[@key='tables']") result = [] cols = {} tables_ref = {} ## tables for table in tables.getchildren(): id_table = table.get("id") name = table.find("./value[@key='name']") t = Table(id_table, name.text) tables_ref[id_table] = t ## columns columns = table.find("./value[@key='columns']") for column in columns.getchildren(): name = column.find("./value[@key='name']").text length = int(column.find("./value[@key='length']").text) simple_type = column.find("./link[@key='simpleType']") if simple_type is None: simple_type = column.find("./link[@key='userType']") simple_type = simple_type.text auto_increment = bool(int(column.find("./value[@key='autoIncrement']").text)) is_not_null = bool(int(column.find("./value[@key='isNotNull']").text)) scale = int(column.find("./value[@key='scale']").text) precision = int(column.find("./value[@key='precision']").text) c = Column(column.get("id"), name, simple_type, length, auto_increment, is_not_null, scale, precision) t.columns.append(c) c.table = t cols[c.id] = c result.append(t) ## indexes indexes = table.find("./value[@key='indices']") for index in indexes: i = Index(index.find("./value[@key='name']").text) t.indexes.append(i) columns = index.find("./value[@key='columns']") is_primary = bool(int(index.find("./value[@key='isPrimary']").text)) is_unique = bool(int(index.find("./value[@key='unique']").text)) for column in columns: col = cols[column.find("./link[@key='referencedColumn']").text] col.is_primary = is_primary col.unique = is_unique i.columns.append(col) for table in tables.getchildren(): ## relations foreign_keys = table.find("./value[@key='foreignKeys']") for foreign_key in foreign_keys.getchildren(): owner_column = cols[foreign_key.find("./value[@key='columns']").getchildren()[0].text] referenced_column = cols[foreign_key.find("./value[@key='referencedColumns']").getchildren()[0].text] is_one_to_one = not bool(int(foreign_key.find("./value[@key='many']").text)) owner_column.define_as_foreignkey(referenced_column, is_one_to_one) # check many to many for table in result: if len(table.columns) == 0 and len(table.foreign_keys) == 2: fk1 = table.foreign_keys[0] fk2 = table.foreign_keys[1] fk1.referenced_column.table.foreign_key_targets.remove(fk1) fk2.referenced_column.table.foreign_key_targets.remove(fk2) fk1_many_to_many = ManyToManyConnection(table, fk1.referenced_column.table, fk2.referenced_column.table, fk1, fk1.referenced_column, True) fk2_many_to_many = ManyToManyConnection(table, fk2.referenced_column.table, fk1.referenced_column.table, fk2, fk2.referenced_column, False) fk1_many_to_many.target_many_to_many = fk2_many_to_many fk2_many_to_many.target_many_to_many = fk1_many_to_many fk1.referenced_column.table.many_to_many_connections.append(fk1_many_to_many) fk2.referenced_column.table.many_to_many_connections.append(fk2_many_to_many) result.remove(table) return result
def whois(request, site): result = urlfetch.fetch('http://www.trynt.com/whois-api/v1/?f=0&h=' + site) whoisres = XML(result.content) name = whoisres.find('Whois/regrinfo/owner/name').text return HttpResponse(name)
def add_single_article_full(pmid, overwrite_existing=True): """Retrieves article metadata from pubmed given article pmid if overwrite_existing is False, attempts to find article in neuroelectro db before searching pubmed Returns neuroelectro.Article object or None if article not found or created """ if not overwrite_existing: try: a = m.Article.objects.get(pmid=pmid) return a except ObjectDoesNotExist: pass MAXURLTRIES = 5 numTries = 0 success = False link = efetch % (pmid) req = Request(link) while numTries < MAXURLTRIES and success == False: try: handle = urlopen(req) success = True except (URLError, HTTPError, BadStatusLine, ParseError): print " failed %d times" % numTries numTries += 1 if numTries == MAXURLTRIES: a = None return a try: data = handle.read() xml = XML(data) titleXML = xml.find(".//ArticleTitle") if titleXML is not None: title = titleXML.text else: title = " " # generate a new article in db a = m.Article.objects.get_or_create(title=title, pmid=pmid)[0] except Exception: return None a.save() # return a <- @Shreejoy should this return be here? # add journalTitle to article journalTitle = xml.find(".//Title") if journalTitle is not None: j = m.Journal.objects.get_or_create(title=journalTitle.text)[0] a.journal = j if j.short_title is None: shortJournalTitleXML = xml.find(".//ISOAbbreviation") if shortJournalTitleXML is not None: j.short_title = shortJournalTitleXML.text j.save() # add authors to article authorList = xml.findall(".//Author[@ValidYN='Y']") if len(authorList) == 0: authorList = xml.findall(".//Author") authorListText = [] for author in authorList: try: last = author.find("./LastName").text fore = author.find("./ForeName").text initials = author.find("./Initials").text authorOb = m.Author.objects.get_or_create(first=fore, last=last, initials=initials)[0] a.authors.add(authorOb) currAuthorStr = "%s %s" % (last, initials) authorListText.append(currAuthorStr) except AttributeError: continue author_list_str = "; ".join(authorListText) author_list_str = author_list_str[0 : min(len(author_list_str), 500)] a.author_list_str = author_list_str # get publication year pub_year = xml.find(".//PubDate/Year") if pub_year is not None: a.pub_year = int(pub_year.text) # find mesh terms and add them to db for x in xml.findall(".//DescriptorName"): if x.text is not None: mesh = m.MeshTerm.objects.get_or_create(term=x.text)[0] a.terms.add(mesh) # find substances and add them to db for x in xml.findall(".//NameOfSubstance"): if x.text is not None: s = m.Substance.objects.get_or_create(term=x.text)[0] a.substances.add(s) abstractXML = xml.findall(".//AbstractText") abstract = " " if len(abstractXML) > 0: abstractList = [x.text for x in abstractXML] abstractList = filter(None, abstractList) abstract = " ".join(abstractList) a.abstract = abstract url = get_article_full_text_url(pmid) a.full_text_link = url a.save() return a