def test_disable_entities_true_attempts_external_dtd(self): xml = """ <!DOCTYPE external [ <!ENTITY ee SYSTEM "http://www.python.org/"> ]> <root>ⅇ</root> """ def raising_external_ref_handler(*args, **kwargs): parser = ParserCreate(*args, **kwargs) parser.ExternalEntityRefHandler = lambda *x: 0 try: feature = "http://apache.org/xml/features/disallow-doctype-decl" parser._reader.setFeature(feature, True) except AttributeError: pass return parser expat.ParserCreate = raising_external_ref_handler # Using this try/catch because a TypeError is thrown before # the ExpatError, and Python 2.6 is confused by that. try: parse(xml, disable_entities=False, expat=expat) except expat.ExpatError: self.assertTrue(True) else: self.assertTrue(False) expat.ParserCreate = ParserCreate
def search_lxml(show, content): """Search TVRage online API for show data.""" try: tree = etree.fromstring(content) except: log.critical('Problem parsing XML with lxml') return None matches = defaultdict(list) # parse show names in the same order as returned by tvrage, first one is usually the good one for xml_show in XPATH_SHOW(tree): for name in extract_names(xml_show): ratio = int(difflib.SequenceMatcher(None, show['clean_name'], clean_name(name)).ratio() * 100) if ratio == 100: return xmltodict.parse(etree.tostring(xml_show))['show'] matches[ratio].append(xml_show) # if no 100% is found, check highest ratio matches for ratio, xml_matches in sorted(matches.items(), reverse=True): for xml_match in xml_matches: if ratio >= 80: return xmltodict.parse(etree.tostring(xml_match))['show'] elif 80 > ratio > 60: if 'country' in show and show['country'] and XPATH_COUNTRY(xml_match): if str.lower(show['country']) == str.lower(XPATH_COUNTRY(xml_match)[0]): return xmltodict.parse(etree.tostring(xml_match))['show']
def test_nested(self): obj = {'a': {'b': '1', 'c': '2'}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj)))) obj = {'a': {'b': {'c': {'@a': 'x', '#text': 'y'}}}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj))))
def nfo_xml_file_tv(media_file_path): """ Find and load nfo and xml file(s) if they exist """ nfo_data = None xml_data = None # check for NFO or XML as no need to do lookup if ID found in it # TODO should check for one dir back too I suppose nfo_file_check = media_file_path.rsplit('/', 1)[0] + 'tvinfo.nfo' if os.path.isfile(nfo_file_check): # check for nfo common_global.es_inst.com_elastic_index('info', {'nfo tv file found': nfo_file_check}) try: nfo_data = xmltodict.parse(common_file.com_file_load_data(nfo_file_check, False)) except xml.parsers.expat.ExpatError: pass except UnicodeDecodeError: pass else: nfo_file_check = media_file_path.rsplit('/', 1)[0] + 'tvshow.nfo' if os.path.isfile(nfo_file_check): # check for nfo common_global.es_inst.com_elastic_index('info', {'nfo tv file found2': nfo_file_check}) try: nfo_data = xmltodict.parse(common_file.com_file_load_data(nfo_file_check, False)) except xml.parsers.expat.ExpatError: pass except UnicodeDecodeError: pass return nfo_data, xml_data
def getelev(x): """Uses USGS elevation service to retrieve elevation Args: x (array of floats): longitude and latitude of point where elevation is desired Returns: ned float elevation of location in meters """ elev = "http://ned.usgs.gov/epqs/pqs.php?x=" + str(x[0]) + "&y=" + str(x[1]) + "&units=Meters&output=xml" try: response = urllib2.urlopen(elev) html = response.read() d = xmltodict.parse(html) g = float(d['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation']) except(BadStatusLine): try: response = urllib2.urlopen(elev) html = response.read() d = xmltodict.parse(html) g = float(d['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation']) except(BadStatusLine): print "could not fetch {:}".format(html) g = 0 pass return g
def convert_results(self, results, output_format, return_format, inherit_from): if output_format == 'json': if return_format.lower() == 'xml': results = dicttoxml(json.loads(results)) elif return_format.lower() == 'object': results = self.json_to_object(json.loads(results), 'QueryObject', inherit_from) else: results = json.loads(results) elif output_format == 'xml': if return_format.lower() == 'json': results = json.loads(json.dumps(xmltodict.parse(results))) elif return_format.lower() == 'object': jsonresults = json.loads(json.dumps(xmltodict.parse(results))) results = self.json_to_object(jsonresults, 'QueryObject', inherit_from) elif output_format == 'javascript': if return_format.lower() in ('json', 'xml', 'object'): print ('Cannot Convert \'JavaScript\' response to \'' + return_format.lower() +'\'...returning \'JavaScript\'') pass return results
def fetch_results(user, item, min_price, max_price): amazon = bottlenose.Amazon("", "", "") response = amazon.ItemSearch( Title=item, MaximumPrice=max_price + "00", MinimumPrice=min_price + "00", SearchIndex="Electronics", ItemPage="5", ) print json.dumps(xmltodict.parse(response), sort_keys=True, indent=4, separators=(",", ": ")) dictionary = xmltodict.parse(response) if "Item" in dictionary: count = 1 temp_dictionary = {} for key in dictionary["ItemSearchResponse"]["Items"]["Item"]: item_id = key["ASIN"] title = key["ItemAttributes"]["Title"] url = key["DetailPageURL"] temp_dictionary[str(count)] = {"Title": title.strip(), "Url": url.strip(), "Key": item_id.strip()} count = count + 1 jsond = demjson.encode(temp_dictionary) print json.dumps(xmltodict.parse(response), sort_keys=True, indent=4, separators=(",", ": ")) else: count = 0 # TODO - CONNECT TO MONGO return count
def xmlparser(xmlfile): xmldoc = open(xmlfile, 'r') xmlcont = xmldoc.read() xmldoc.close() tree = xmltodict.parse(xmlcont) #print tree #checktree(tree) if(tree.get('project')): if(tree['project'].get('properties')): properties = tree['project']['properties'] xmlcont = expand(xmlcont, properties) tree = xmltodict.parse(xmlcont) if(tree.get('project')): dependencies = tree['project']['dependencies'] else: dependencies = tree['dependencies'] mav_coords = [] for item in dependencies['dependency']: mav = str(item['groupId']) + ":" + str(item['artifactId']) + ":" + str(item['version']) mav_coords.append(mav) return(mav_coords)
def _epub_parser(epub): """ Handle EPUB specific parsing Return dict of ebook metadata An EPUB must contain META-INF/container.xml, which contains the path to the EPUB metadata file. """ sha256 = file_hash(epub) zf = ZipFile(epub) xml = xmltodict.parse(zf.read('META-INF/container.xml')) metadata_path = xml['container']['rootfiles']['rootfile']['@full-path'] # TODO: validate this is true for all EPUBs raw_metadata = xmltodict.parse(zf.read(metadata_path)) metadata = {'format': 'epub'} for k, v in raw_metadata['package']['metadata'].items(): if 'dc:' in k: if 'creator' in k: # Required element, needs additional parsing k = 'author' v = v['#text'] if 'identifier' in k: # Required element, needs additional parsing k = 'identifiers' if not isinstance(v, list): v = [v] # Just in case we get a single element identifiers = [] for i in v: identifiers.append({'identifier': i['@opf:scheme'], 'value': i['#text']}) # Support multiple identifiers v = identifiers metadata[k.split('dc:')[-1]] = v metadata['identifiers'].append({'identifier': 'sha256', 'value': sha256}) return metadata
def test_init_with_safe_encrypt_mode(self): """ 测试安全模式下的初始化 """ conf = WechatConf( token=self.token, appid=self.appid, appsecret=self.appsecret, encrypt_mode="safe", encoding_aes_key=self.encoding_aes_key, ) self.assertEqual(conf.token, self.token) self.assertEqual(conf.appid, self.appid) self.assertEqual(conf.appsecret, self.appsecret) self.assertEqual(conf.encoding_aes_key, self.encoding_aes_key) self.assertIsNotNone(conf.crypto) # 测试解密微信服务器的请求消息 req = conf.crypto.decrypt_message( msg=self.safe_request_message, msg_signature=self.msg_signature, timestamp=self.timestamp, nonce=self.nonce ) self.assertEqual(xmltodict.parse(req), xmltodict.parse(self.normal_request_message)) # 测试加密返回信息 origin_crypto = conf.crypto._WechatBaseCrypto__pc conf.crypto._WechatBaseCrypto__pc = TestBaseCrypto(key=conf.crypto._WechatBaseCrypto__key) resp = conf.crypto.encrypt_message(msg=self.response_message, nonce=self.nonce, timestamp=self.timestamp) self.assertEqual(xmltodict.parse(resp), xmltodict.parse(self.response_encrypted_message)) conf.crypto._WechatBaseCrypto__pc = origin_crypto
def get_dsn_raw(): """ returns a current snapshot of the DSN xml feed converted to json, and updates a copy in redis. gets dsn xml feed, converts to json, saves json to redis, returns json """ # pass the url a param 'r' = timestamp to avoid hitting their cloudfront cache timestamp = str(int(mktime(datetime.now().timetuple()))) response = urlopen('https://eyes.nasa.gov/dsn/data/dsn.xml?r=' + timestamp) dom=parse(response) dsn_data = {} for node in dom.childNodes[0].childNodes: if not hasattr(node, 'tagName'): # useless nodes continue # dsn feed is strange: dishes should appear inside station nodes but don't # so converting entire xml doc to dict loses the station/probe relation # so have to parse node by node to grab station THEN convert dish node to dict if node.tagName == 'station': xmltodict.parse(node.toxml()) station = node.getAttribute('friendlyName') dsn_data.setdefault(station, {}) dsn_data[station]['friendlyName'] = node.getAttribute('friendlyName') dsn_data[station]['timeUTC'] = node.getAttribute('timeUTC') dsn_data[station]['timeZoneOffset'] = node.getAttribute('timeZoneOffset') if node.tagName == 'dish': dsn_data[station].setdefault('dishes', []).append(xmltodict.parse(node.toxml())['dish']) r_server.set('dsn_raw', dumps(dsn_data)) return dsn_data
def test_nested(self): obj = {"a": {"b": "1", "c": "2"}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj)))) obj = {"a": {"b": {"c": {"@a": "x", "#text": "y"}}}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj))))
def test_encoded_string(self): try: value = unichr(39321) except NameError: value = chr(39321) xml = "<a>%s</a>" % value self.assertEqual(parse(xml), parse(xml.encode("utf-8")))
def get_fundir(session, mps_in_nefndir): url = 'http://www.althingi.is/altext/xml/nefndarfundir/?lthing='+str(session) response = requests.get(url) data = xmltodict.parse(response.text) fundir = [] for fundur in data[u'nefndarfundir'][u'nefndarfundur']: try: dagsetning = fundur[u'hefst'][u'dagur'] nefnd_id = fundur[u'nefnd'][u'@id'] fundargerd = requests.get(fundur[u'nánar'][u'fundargerð'][u'xml']) fundargerd_data = xmltodict.parse(fundargerd.text) maeting = fundargerd_data[u'nefndarfundur'][u'fundargerð'][u'texti'].split('</h2>')[1].split('<BR><BR>')[0] attendance = maeting.split('<BR>') mps = [] for a in attendance: if 'fyrir' in a: m = a.split('fyrir ')[1].split(' (')[0] #laga fallbeygingu mps.append(process.extractOne(m, mps_in_nefndir)[0]) else: mps.append(a.split(' (')[0]) #mps = [i.split(' (')[0] for i in attendance] fundir.append({'nefnd': nefnd_id, 'mps': mps, 'dagsetning': dagsetning}) except Exception as e: print(fundur[u'nefnd']['#text'] + ' - ' + fundur[u'hefst'][u'texti']) return fundir
def create_vm(self): with open(self.settings['template']) as template: conf = xmltodict.parse(template.read()) conf['domain']['name'] = self.name conf['domain']['devices']['disk']['source']['@file'] = self.path del(conf['domain']['uuid']) self._setup_vm_interfaces(conf) with open('node.xml', 'w') as node: node.write(xmltodict.unparse(conf, pretty=True)) shell('qemu-img create -f qcow2 ' + self.path + ' 8G -o preallocation=metadata') shell('virsh define node.xml') print 'node defined, starting' shell('virsh start ' + self.name) print 'node started' # We now have a new node. Find its MAC address so we can identify it in MAAS conf = xmltodict.parse(grab('virsh dumpxml ' + self.name)) interfaces = conf['domain']['devices']['interface'] if isinstance(interfaces, list): # Just grab the mac_address of the first interface, we'll just # require it to be the one that boots and can find MAAS for now interface = interfaces[0] for interface in interfaces: self.mac_addresses.append(interface['mac']['@address']) elif isinstance(interfaces, dict): # actually OrderedDict interface = interfaces self.mac_addresses = [interface['mac']['@address']] else: raise RuntimeError("don't know how to handle interfaces that is a %s".format( type(interfaces))) self.mac_address = self.mac_addresses[0]
def _query(self, data: List[SeismicData], **kwargs): """ This is the method that all models override. It handles querying the velocity model and filling in the SeismicData structures. Args: points (:obj:`list` of :obj:`SeismicData`): List of SeismicData objects containing the points in depth. These are to be populated with :obj:`VelocityProperties`: Returns: True on success, false if there is an error. """ if "params" not in kwargs: display_and_raise_error(21) try: with open(kwargs["params"], "r") as fd: xml_in = xmltodict.parse(fd.read()) self.data_dir = xml_in["root"]["out_dir"] except FileNotFoundError: with open(kwargs["params"] + ".xml", "r") as fd: xml_in = xmltodict.parse(fd.read()) self.data_dir = xml_in["root"]["out_dir"] self._initialize(xml_in["root"]) if xml_in["root"]["format"] == "awp": self._awp_query(data) elif xml_in["root"]["format"] == "rwg": self._rwg_query(data) elif xml_in["root"]["format"] == "etree": self._etree_query(data)
def load_coreNLP_annotations(filename, token_keys=['word','lemma','POS','NER']): sentences = [] def tokens_callback(path, tokens): try: tokenlist = tokens['tokens']['token'] if "@id" in tokenlist: # avoid singleton bug tokenlist = [tokenlist] s = Sentence(tokenlist, keys=token_keys) sentences.append(s) return True except Exception as e: print >> sys.stderr, "Error parsing sentence %d" % len(sentences) print >> sys.stderr, repr(e) pdb.set_trace() return False with open(filename) as fd: # streaming parse of each # root/document/sentences/sentence xmltodict.parse(fd, item_depth=4, item_callback=tokens_callback) return sentences
def wrapper(*args, **kwargs): rtc_obj = args[0].get_rtc_obj() if not hasattr(rtc_obj, "headers") or rtc_obj.headers is None: # still in the initialization or relogin # directly call the method return func(*args, **kwargs) else: # check whether token expires try: resp = func(*args, **kwargs) xmltodict.parse(resp.content) return resp except ExpatError as excp: if "invalid token" in str(excp): # expires try: rtc_obj.relogin() except RTCException: raise RTCException("Relogin Failed: " "Invalid username or password") kwargs["headers"]["Cookie"] = rtc_obj.headers["Cookie"] return func(*args, **kwargs) else: # not expires # raise the actual exception raise ExpatError(excp)
def __init__(self, conn, switch_dict): self.conn = conn self.switch_dict = switch_dict self.op_rpc = ( 'show ethernet-switching interfaces detail | display xml') self.xml_output = self.conn.send_command(self.op_rpc) if self.switch_dict['username'] == "root": self.clean_xml = str(self.xml_output).partition("\n")[2] else: self.clean_xml = str(self.xml_output).strip().partition("\n")[2]\ if self.clean_xml: try: self.dict_of_xml = xmltodict.parse(self.clean_xml) except ExpatError: try: self.dict_of_xml = xmltodict.parse(self.xml_output) except: raise self.up_access_interfaces = 0 for interface in self.dict_of_xml['rpc-reply']['switching-interface-information']['interface']: self.gige_re = re.compile('ge-.*') if (self.gige_re.match(interface['interface-name']) and interface['interface-port-mode'] == "Access" and interface['interface-state'] == "up"): self.up_access_interfaces += 1 else: print("I connected, but no valid response was received from the " "switch. Here's the raw output:<snip>\n{0}".format(self.xml_output)) print("</snip>") raise ValueError
def generate_references(self): self.zip_file = zipfile.ZipFile( self.book_filename, mode='r', allowZip64=True) self.file_list = self.zip_file.namelist() # Book structure relies on parsing the .opf file # in the book. Now that might be the usual content.opf # or package.opf or it might be named after your favorite # eldritch abomination. The point is we have to check # the container.xml container = self.find_file('container.xml') if container: container_xml = self.zip_file.read(container) container_dict = xmltodict.parse(container_xml) packagefile = container_dict['container']['rootfiles']['rootfile']['@full-path'] else: presumptive_names = ('content.opf', 'package.opf', 'volume.opf') for i in presumptive_names: packagefile = self.find_file(i) if packagefile: logger.info('Using presumptive package file: ' + self.book_filename) break packagefile_data = self.zip_file.read(packagefile) self.opf_dict = xmltodict.parse(packagefile_data)
def create_get_workout_xml(login_response_xml): login_xml_dict = xmltodict.parse(login_response_xml) with open(get_workout_xml) as fd: workout_xml_dict = xmltodict.parse(fd.read()) set_ids_for_workout_xml(login_xml_dict, workout_xml_dict) xml_to_send = xmltodict.unparse(workout_xml_dict) return xml_to_send
def get_info(self, request): path = self.translate_path(request.form['path']) parts = path.partition('/representations') ip = parts[0] hrefs = self._get_href_variations(parts[1] + parts[2]) namespace = '{http://ead3.archivists.org/schema/}' tree = ET.parse('%s/metadata/descriptive/EAD.xml' % ip) # regular file - daoset for href in hrefs: did_list = tree.findall(".//%sdid/*/%sdao[@href='%s']/../.." % (namespace, namespace, href)) if did_list: o = xmltodict.parse(ET.tostring(did_list[0])) return json.dumps(o) # regular file - no daoset for href in hrefs: did_list = tree.findall(".//%sdid/%sdao[@href='%s']/.." % (namespace, namespace, href)) if did_list: o = xmltodict.parse(ET.tostring(did_list[0])) return json.dumps(o) # directory for href in hrefs: did_list = tree.findall(".//%sc[@base='%s']/%sdid" % (namespace, href, namespace)) if did_list: o = xmltodict.parse(ET.tostring(did_list[0])) return json.dumps(o) # fallback return flask.jsonify( error=404, error_text='Not Found', info='No metadata associated to this element' )
def test_gen_model(): stream = StringIO() delegate = mock with XMLGenerator(stream, skip_stringify=True) as xg: delegate._gen_model(xg, Model('single_port_ram', (ModelInputPort("we", clock="clk"), ModelInputPort("addr", clock="clk", combinational_sink_ports=["out"]), ModelInputPort("data", clock="clk", combinational_sink_ports=["out"]), ModelInputPort("clk", is_clock=True)), (ModelOutputPort("out", clock="clk"), ))) back = parse(stream.getvalue(), encoding="ascii", dict_constructor=dict) gold = parse(""" <model name="single_port_ram"> <input_ports> <port name="we" clock="clk"/> <port name="addr" clock="clk" combinational_sink_ports="out"/> <port name="data" clock="clk" combinational_sink_ports="out"/> <port name="clk" is_clock="1"/> </input_ports> <output_ports> <port name="out" clock="clk"/> </output_ports> </model> """, dict_constructor=dict) assert back == gold
def add_keywords(self, domain_name, keywords): payload = { 'user_name': self.user_name, 'api_key': self.api_key, 'action': 'managedomains', 'type': 'keyword', 'setting': keywords[0], 'domain': domain_name } response = requests.get('https://api.parkingcrew.com/manage_v3.php', params=payload) response_dict = xmltodict.parse(response.text) if response_dict['response']['result']['success'] == '0': return (False, response_dict['response']['result']['error']['msg']) payload = { 'user_name': self.user_name, 'api_key': self.api_key, 'action': 'managedomains', 'type': 'related', 'setting': '|'.join(keywords), 'domain': domain_name } response = requests.get('https://api.parkingcrew.com/manage_v3.php', params=payload) response_dict = xmltodict.parse(response.text) if response_dict['response']['result']['success'] == '1': return (True, 'Success') elif response_dict['response']['result']['success'] == '0': return (False, response_dict['response']['result']['error']['msg'])
def retrieve_individual_firewall(): # Individual fw db.drop_collection('fw') retrieveConfig('fw', os.getenv("USER"),password) xml_file = 'fw.xml' with open(xml_file) as fd: mydict = xmltodict.parse(fd.read()) os.remove('%s' %xml_file) db.fw.insert(mydict,check_keys=False) # check_keys false otherwise # Individual fw db.drop_collection('fw') retrieveConfig('fw1', os.getenv("USER"),password) xml_file = 'fw1.xml' with open(xml_file) as fd: mydict = xmltodict.parse(fd.read()) os.remove('%s' %xml_file) db.fw1.insert(mydict,check_keys=False) # check_keys false otherwise # Individual fwxd db.drop_collection('fw1') retrieveConfig('fw2', os.getenv("USER"),password) xml_file = 'fw2.xml' with open(xml_file) as fd: mydict = xmltodict.parse(fd.read()) os.remove('%s' %xml_file) db.fw2.insert(mydict,check_keys=False) # check_keys false otherwise
def geoSearch(geo=None): photos = flickr.photos.search(lat=41.8830663, lon =-87.63293, radius = 20) photos = xmltodict.parse(photos) for photo in photos['photos']['photo']: photo_id = photo['@id'] photo_id = json.dumps(photo_id) #location = venue['location'] #lon = venue['location']['lat'] #lng = venue['location']['lng'] #comments = flickr.photos.comments.getList(lat=41.8830663, lon =-87.63293, radius = 20) info = flickr.photos.getInfo(photo_id = photo_id) info = xmltodict.parse(info) description = info['photo']['description'] location = flickr.photos.geo.getLocation(photo_id = photo_id) location = xmltodict.parse(location) location = info['photo']['location'] temp = {} temp['text'] = description temp['location'] = location insert(temp)
def bus_data(): cta_key = 'w8xeHXDhPGHtGCY7mngPuNcpD' routes_api_url = ( 'http://www.ctabustracker.com/bustime/api/v1/getroutes?key=%s') vehicle_api_url = ( 'http://www.ctabustracker.com/bustime/api/v1/getvehicles?key=%s&rt=%s') local_file = ( '/Users/abrahamepton/Tribune/toytrains/static/json/bus_data.json') s3_key = 'toytrains/static/json/bus_data.json' response = urllib2.urlopen(routes_api_url % cta_key) routes = xmltodict.parse(response.read()) curr_idx = 0 route_requests = [[]] for r in routes['bustime-response']['route']: if len(route_requests[curr_idx]) == 10: curr_idx += 1 route_requests.append([]) route_requests[curr_idx].append(r['rt']) vehicles = [] for rts in route_requests: response = urllib2.urlopen(vehicle_api_url % (cta_key, ','.join(rts))) data = xmltodict.parse(response.read()) for vehicle in data['bustime-response']['vehicle']: vehicles.append(vehicle) FH = open(local_file, 'w') FH.write(json.dumps(vehicles)) FH.close() _write_string_to_s3(s3_key, json.dumps(vehicles)) return 'Done'
def _process_file(self, f, attr_prefix='ATTR_'): """xmltodict can either return attributes of nodes as prefixed fields (prefixes to avoid key collisions), or ignore them altogether. set attr prefix to whatever you want. Setting it to False ignores attributes. """ import xmltodict if self.postprocessor: obj = xmltodict.parse(f, attr_prefix=self.attr_prefix, postprocessor=self.postprocessor) else: obj = xmltodict.parse(f, attr_prefix=self.attr_prefix) # If node list was given, walk down the tree if self.node_list: for node in self.node_list: obj = obj[node] # If the top-level XML object in the file is a list # then yield each element separately; otherwise, yield # the top-level object. if isinstance(obj, list): for record in obj: yield record else: yield obj
def parse_nessus_file(nessus_file, protocol): targets = [] def handle_nessus_file(path, item): # Must return True otherwise xmltodict will throw a ParsingIterrupted() exception # https://github.com/martinblech/xmltodict/blob/master/xmltodict.py#L219 if any('ReportHost' and 'ReportItem' in values for values in path): item = dict(path) ip = item['ReportHost']['name'] if ip in targets: return True port = item['ReportItem']['port'] svc_name = item['ReportItem']['svc_name'] if port in protocol_dict[protocol]['ports']: targets.append(ip) if svc_name in protocol_dict[protocol]['services']: targets.append(ip) return True else: return True with open(nessus_file, 'r') as file_handle: xmltodict.parse(file_handle, item_depth=4, item_callback=handle_nessus_file) return targets
import xmltodict import json from pprint import pprint with open('data/buildings.xml', 'r') as content_file: buildings_content = content_file.read() buildings_data = xmltodict.parse(buildings_content) with open('data/buildings.xml', 'r') as content_file: libs_content = content_file.read() libs_data = xmltodict.parse(libs_content) def polygon_to_list(polygon): lst = [] if type(polygon['outerBoundaryIs']) == list: for i in polygon['outerBoundaryIs']: new_lst = [] for item in i['LinearRing']['coordinates'].split('\n'): # print(1) coordinates = item.split(',') lat, lng = float(coordinates[1]), float(coordinates[0]) new_lst.append([lat, lng]) lst.append([new_lst]) else: if len(polygon['outerBoundaryIs']['LinearRing']['coordinates'].split( '\n')) == 1: items = polygon['outerBoundaryIs']['LinearRing'][
def load(filename): # Get extension without the leading dot extension = pathlib.Path(filename).suffix[1:] if extension == 'json': import json with open(filename, 'r') as file: result = json.load(file) elif extension in ('yaml', 'yml'): import yaml with open(filename, 'r') as file: result = yaml.load(file, Loader=yaml.Loader) else: import xmltodict # Use json to transform the OrderedDict returned from xmltodict.parse to a normal dict, # not a difference but ok... import json with open(filename, 'r') as file: result = xmltodict.parse(file.read()) result = dict(result['root']) result = json.loads(json.dumps(result)) # xmltodict includes some garbage when parsing lists, so we still have to # manually parse input (sigh...) # If there are targets with empty properties, replace None value with {} if result.get('targets'): for target, targetval in result['targets'].items(): if targetval is None: result['targets'][target] = {} # If there are activities with empty values, replace None with {} if result.get('activities'): for activity in result['activities'].keys(): if result['activities'][activity] is None: result['activities'][activity] = {} # Transform objects with 'item' key to a list of those objects if result.get('activities_conditions'): result['activities_conditions'] = result[ 'activities_conditions']['item'] if type(result['activities_conditions']).__name__ != 'list': result['activities_conditions'] = [ result['activities_conditions'] ] for condition in result['activities_conditions']: if type(condition['action_arguments'] ['item']).__name__ != 'list': condition['action_arguments'] = [ condition['action_arguments']['item'] ] if result.get('targets_conditions'): result['targets_conditions'] = result['targets_conditions'][ 'item'] if type(result['targets_conditions']).__name__ != 'list': result['targets_conditions'] = [ result['targets_conditions'] ] for condition in result['targets_conditions']: if type(condition['action_arguments'] ['item']).__name__ != 'list': condition['action_arguments'] = [ condition['action_arguments']['item'] ] return result
def get_hotel_info_increment(city_id, page_index=1): """查询city所属酒店增量ID信息,并调用静态信息查询接口""" ping_url = 'http://{}/hotel/GetHotelInfoIncrement.asmx'.format(API_URL) ts = int(time.time()) end = datetime.datetime.now() - datetime.timedelta(minutes=1) end = end.strftime("%Y-%m-%dT%H:%M:%S") start = datetime.datetime.now() - datetime.timedelta(minutes=16) start = start.strftime("%Y-%m-%dT%H:%M:%S") # start-end 时间间隔15分钟 secret = hashlib.md5(KEY.encode('utf8')).hexdigest().upper() sign = hashlib.md5('{0}{1}{2}{3}GetHotelInfoIncrement'.format( ts, AID, secret, SID).encode('utf-8')).hexdigest().upper() request_xml = ('<?xml version="1.0" encoding="utf-8"?>' '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' '<soap:Body>' '<Request xmlns="http://ctrip.com/">' '<requestXML><![CDATA[' '<?xml version="1.0" encoding="utf-8"?>' '<Request>' '<Header AllianceID="{0}" SID="{1}" TimeStamp="{2}" RequestType="GetHotelInfoIncrement" Signature="{3}" />' '<GetHotelInfoIncrementRequest>' '<StartTime>{4}</StartTime>' '<EndTime>{5}</EndTime>' '<CityID>{6}</CityID>' '<PageSize>10</PageSize>' '<PageIndex>{7}</PageIndex>' '<CountryID>1</CountryID>' '</GetHotelInfoIncrementRequest>' '</Request>]]>' '</requestXML>' '</Request>' '</soap:Body>' '</soap:Envelope>').format(AID, SID, ts, sign, start, end, city_id, page_index) encode_xml = request_xml.encode('utf-8') headers = { 'Content-Type': 'text/xml; charset=utf-8', 'Host': 'openapi.ctrip.com', 'Content-Length': len(encode_xml) } resp = requests.post(url=ping_url, data=encode_xml, headers=headers).text xmlt_data = xmltodict.parse(xmltodict.parse(resp)['soap:Envelope'][ 'soap:Body']['RequestResponse']['RequestResult']) # xml 转 dict xmlt_data = (json.loads( (json.dumps(xmlt_data, ensure_ascii=False, indent=2)).replace('@', ''))) # 代码健壮性之首要! def judgment(keys, data=xmlt_data, default=[]): try: if isinstance(keys, str)and isinstance(data, dict): return data.get(keys, default) if isinstance(keys, list)and isinstance(data, dict): if not len(keys) == 1: if judgment(keys[0], data=data): return judgment(keys[1:], data=judgment(keys[0], data=data)) else: return judgment(keys[-1], data=data) else: return [] except KeyError as e: return [] access_count = judgment(['Response', 'Header', 'AccessCount']) hotel_ids = judgment( ['Response', 'GetHotelInfoIncrementResponse', 'HotelInfoChangeList', 'HotelID']) if hotel_ids: if not isinstance(hotel_ids, list): hotel_ids = [hotel_ids] get_hotel_info_increment.delay(city_id, page_index + 1) for hotel_ids_index in range(len(hotel_ids) // 10 + 1): hotel_ids_message_fetch.delay( hotel_ids[hotel_ids_index * 10:(hotel_ids_index + 1) * 10]) return['city', city_id, page_index, 'updating'] else: return ['city', city_id, page_index, 'updating']
def hotel_ids_rateplans_fetch(hotel_ids): """查询酒店ID对应price日历,返回最低价格 """ start = str(datetime.date.today()) end = str(datetime.date.today() + datetime.timedelta(days=28)) ping_url = 'http://{}/Hotel/OTA_HotelRatePlan.asmx'.format(API_URL) ts = int(time.time()) secret = hashlib.md5(KEY.encode('utf8')).hexdigest().upper() sign = hashlib.md5('{0}{1}{2}{3}OTA_HotelRatePlan'.format( ts, AID, secret, SID).encode('utf-8')).hexdigest().upper() request_xml = ('<?xml version="1.0" encoding="utf-8"?>' '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' '<soap:Body>' '<Request xmlns="http://ctrip.com/">' '<requestXML><![CDATA[' '<?xml version="1.0" encoding="utf-8"?>' '<Request>' '<Header AllianceID="{0}" SID="{1}" TimeStamp="{2}" RequestType="OTA_HotelRatePlan" Signature="{3}"/>' '<HotelRequest>' '<RequestBody xmlns:ns="http://www.opentravel.org/OTA/2003/05" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">' '<ns:OTA_HotelRatePlanRQ TimeStamp="{4}" Version="1.0">' '<ns:RatePlans>' "{rateplans}" '</ns:RatePlans>' '</ns:OTA_HotelRatePlanRQ>' '</RequestBody>' '</HotelRequest>' '</Request>]]>' '</requestXML>' '</Request>' '</soap:Body>' '</soap:Envelope>') rateplan = ('<ns:RatePlan>' '<ns:DateRange Start="{start}" End="{end}"/>' '<ns:RatePlanCandidates>' '<ns:RatePlanCandidate AvailRatesOnlyInd="true" IsCNYCurrency="true" RatePlanCode=" " >' '<ns:HotelRefs>' '<ns:HotelRef HotelCode="{hotel_ids[index]}"/>' '</ns:HotelRefs>' '</ns:RatePlanCandidate>' '</ns:RatePlanCandidates>' '<ns:TPA_Extensions RestrictedDisplayIndicator="false" />' '</ns:RatePlan>') rateplans = '' n = len(hotel_ids) for index in range(n): rateplans += rateplan.replace('index', str(index)) rateplans = rateplans.format(hotel_ids=hotel_ids, start=start, end=end) request_xml = request_xml.format( AID, SID, ts, sign, datetime.datetime.now( ).strftime('%Y-%m-%dT%H:%M:%S.%f+08:00'), rateplans=rateplans) encode_xml = request_xml.encode('utf-8') headers = {'Content-Type': 'text/xml; charset=utf-8', 'Host': 'openapi.ctrip.com', 'Content-Length': len(encode_xml)} try: resp = requests.post(url=ping_url, data=encode_xml, headers=headers).text xmlt_data = xmltodict.parse(xmltodict.parse(resp)[ 'soap:Envelope']['soap:Body']['RequestResponse']['RequestResult']) # xml 转 dict xmlt_data = (json.loads( (json.dumps(xmlt_data, ensure_ascii=False, indent=2)).replace('@', ''))) datas = xmlt_data['Response']['HotelResponse'][ 'OTA_HotelRatePlanRS']['RatePlans'] except KeyError: hotel_ids_rateplans_fetch.delay(hotel_ids) return ['hotel_ids_rateplans_fetch retry'] if isinstance(datas, list): for data in datas: rateplans_get_price_and_insert.delay(data) else: rateplans_get_price_and_insert.delay(datas) return ['rateplans fetch is OK']
def hotel_ids_message_fetch(hotel_ids): """酒店静态信息查询""" ping_url = 'http://{}/Hotel/OTA_HotelDescriptiveInfo.asmx'.format(API_URL) ts = int(time.time()) secret = hashlib.md5(KEY.encode('utf8')).hexdigest().upper() sign = hashlib.md5('{0}{1}{2}{3}OTA_Ping'.format( ts, AID, secret, SID).encode('utf-8')).hexdigest().upper() request_xml = ('<?xml version="1.0" encoding="utf-8"?>' '<soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">' '<soap:Body>' '<Request xmlns="http://ctrip.com/">' '<requestXML><![CDATA[' '<?xml version="1.0" encoding="utf-8"?>' '<Request>' '<Header AllianceID="{0}" SID="{1}" TimeStamp="{2}" RequestType="OTA_Ping" Signature="{3}" />' '<HotelRequest>' '<RequestBody xmlns:ns="http://www.opentravel.org/OTA/2003/05" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">' '<OTA_HotelDescriptiveInfoRQ Version="1.0" xsi:schemaLocation="http://www.opentravel.org/OTA/2003/05OTA_HotelDescriptiveInfoRQ.xsd" xmlns="http://www.opentravel.org/OTA/2003/05" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">' '<HotelDescriptiveInfos>' "{4}" '</HotelDescriptiveInfos>' '</OTA_HotelDescriptiveInfoRQ>' '</RequestBody>' '</HotelRequest>' '</Request>]]>' '</requestXML>' '</Request>' '</soap:Body>' '</soap:Envelope>') hotel_desc = ('<HotelDescriptiveInfo HotelCode="{0[index]}" PositionTypeCode="502">' '<HotelInfo SendData="true"/>' '<FacilityInfo SendGuestRooms="true"/>' '<AreaInfo SendAttractions="true" SendRecreations="true"/>' '<ContactInfo SendData="true"/>' '<MultimediaObjects SendData="true"/>' '</HotelDescriptiveInfo>') hotel_index = '' n = len(hotel_ids) if n == 0: return ['hotel_ids is []'] for i in range(n): hotel_index += hotel_desc.replace('index', str(i)) hotel_desc = hotel_index.format(hotel_ids) request_xml = request_xml.format(AID, SID, ts, sign, hotel_desc) encode_xml = request_xml.encode('utf-8') headers = {'Content-Type': 'text/xml; charset=utf-8', 'Host': 'openapi.ctrip.com', 'Content-Length': len(encode_xml)} try: resp = requests.post(url=ping_url, data=encode_xml, headers=headers).text xmlt_data = xmltodict.parse(xmltodict.parse(resp)[ 'soap:Envelope']['soap:Body']['RequestResponse']['RequestResult']) # xml 转 dict xmlt_data = (json.loads( (json.dumps(xmlt_data, ensure_ascii=False, indent=2)).replace('@', ''))) datas = xmlt_data['Response']['HotelResponse']['OTA_HotelDescriptiveInfoRS'][ 'HotelDescriptiveContents']['HotelDescriptiveContent'] except KeyError as e: hotel_ids_message_fetch.delay(hotel_ids) pprint(hotel_ids) return ['hotel_ids_message_fetch retries'] if not isinstance(datas, list): datas = [datas] for data in datas: datas_purge_and_insert.delay(data) hotel_ids_rateplans_fetch.delay(hotel_ids) return ['message fetch is OK']
def main(): with open(args.junitxml) as fd: host = socket.gethostname() doc = xmltodict.parse(fd.read()) total_tests = int(doc['testsuite']['@tests']) elapsed_time = float(doc['testsuite']['@time']) log = { 'errors': list(), 'failures': list(), 'skipped': list(), 'success': list() } for testcase in doc['testsuite']['testcase']: logmsg = dict() logmsg['name'] = testcase['@name'] logmsg['classname'] = testcase['@classname'] logmsg['time'] = float(testcase['@time']) if 'error' in testcase: msgtype = 'errors' logmsg['message'] = testcase['error']['@message'] logmsg['trace'] = testcase['error']['#text'] elif 'failure' in testcase: msgtype = 'failures' logmsg['message'] = testcase['failure']['@message'] logmsg['trace'] = testcase['failure']['#text'] elif 'skipped' in testcase: msgtype = 'skipped' logmsg['message'] = testcase['skipped']['@message'] else: msgtype = 'success' if 'system-out' in testcase: logmsg['system-out'] = testcase['system-out'] if 'system-err' in testcase: logmsg['system-err'] = testcase['system-err'] # cleanup of message, see also: # http://stackoverflow.com/questions/40012526/junitxml-and-pytest-difference-in-message-when-running-in-parallel if 'message' in logmsg: if 'AssertionError: ' in logmsg['message']: logmsg['message'] = re.sub( r".*E +(AssertionError: .*) E +assert.*", r"\1", logmsg['message']) if 'HTTPError: ' in logmsg['message']: logmsg['message'] = re.sub( r".*E +(HTTPError: .*) [^ ]*: HTTPError.*", r"\1", logmsg['message']) if 'OSError: ' in logmsg['message']: logmsg['message'] = re.sub( r".*E +(OSError: .*) [^ ]*: OSError.*", r"\1", logmsg['message']) # hide some private information for key in logmsg: if 'username' in str(logmsg[key]): logmsg[key] = re.sub(r"username = '******']*'", "username = '******'", logmsg[key]) if 'password' in str(logmsg[key]): logmsg[key] = re.sub(r"password = '******']*'", "password = '******'", logmsg[key]) if 'api_key' in str(logmsg[key]): logmsg[key] = re.sub(r"api_key = '[^']*'", "api_key = 'api_key'", logmsg[key]) if 'api_token' in str(logmsg[key]): logmsg[key] = re.sub(r"api_token = '[^']*'", "api_token = 'api_token'", logmsg[key]) log[msgtype].append(logmsg) mongoid = report_mongo(host, total_tests, elapsed_time, log) report_console(host, total_tests, elapsed_time, log, mongoid) report_email(host, total_tests, elapsed_time, log, mongoid)
SetVar(var_, datos) except Exception as e: PrintException() raise e if module == "xml2Dict": path = GetParams('path') var_ = GetParams('result') encoding = GetParams('encoding') try: if not encoding: encoding = "latin-1" with open(path, encoding=encoding) as fd: doc = xmltodict.parse(fd.read()) # doc = eval(str(json.dumps(doc)).replace("null", '""').replace("false", "False").replace("true", "True")) SetVar(var_, json.loads(json.dumps(doc))) except Exception as e: PrintException() raise e if module == "xml_str2Dict": xml = GetParams('xml') result = GetParams('result') try: doc = xmltodict.parse(xml) SetVar(result, json.dumps(doc)) except Exception as e: PrintException()
def convert_game_value(onefile, feature_list, pgn2value): doc = xmltodict.parse(open(onefile, encoding='utf-8').read()) fen = doc['ChineseChessRecord']["Head"]["FEN"] if pgn2value is not None: pgnfile = doc['ChineseChessRecord']["Head"]["From"] moves = [ i["@value"] for i in doc['ChineseChessRecord']['MoveList']["Move"] if i["@value"] != '00-00' ] bb = BaseChessBoard(fen) if pgn2value is not None: val = pgn2value[pgnfile] #print(val) else: place = onefile.split('.')[-2].split('_')[-1] if place == 'w': val = 1 elif place == 'b': val = -1 else: val = 0 red = False for i in moves: red = not red x1, y1, x2, y2 = int(i[0]), int(i[1]), int(i[3]), int(i[4]) #print("{} {}".format(i,"红" if red else "黑")) boardarr = bb.get_board_arr() # chess picker features picker_x = [] #picker_y = [] if red: for one in feature_list['red']: picker_x.append(np.asarray(boardarr == one, dtype=np.uint8)) for one in feature_list['black']: picker_x.append(np.asarray(boardarr == one, dtype=np.uint8)) else: for one in feature_list['black']: picker_x.append(np.asarray(boardarr == one, dtype=np.uint8)) for one in feature_list['red']: picker_x.append(np.asarray(boardarr == one, dtype=np.uint8)) picker_x = np.asarray(picker_x) picker_y = np.asarray([ board.x_axis[x1], board.y_axis[y1], board.x_axis[x2], board.y_axis[y2], ]) picker_y_rev = np.asarray([ board.x_axis[x1], board.y_axis[9 - y1], board.x_axis[x2], board.y_axis[9 - y2], ]) #target = np.zeros((10,9)) #target[y1,x1] = 1 #picker_y = target # ## chess mover features #mover_x = [] #mover_y = [] #mover_x = np.concatenate((picker_x,target.reshape((1,10,9)))) #mover_y = np.zeros((10,9)) #mover_y[y2,x2] = 1 if red: yield picker_x, picker_y, val #,picker_y,mover_x,mover_y else: yield picker_x[:, :: -1, :], picker_y_rev, -val #,picker_y[::-1,:],mover_x[:,::-1,:],mover_y[::-1,:] moveresult = bb.move(Pos(x1, y1), Pos(x2, y2)) assert (moveresult != None)
def test_pagseguro(): initialize() c = SmartClient() default_product = get_default_product() basket_path = reverse("shuup:basket") c.post(basket_path, data={ "command": "add", "product_id": default_product.pk, "quantity": 1, "supplier": get_default_supplier().pk }) shipping_method = get_default_shipping_method() processor = get_payment_provider() payment_method = processor.create_service( PagSeguroPaymentMethod.ONLINE_DEBIT.value, identifier="pagseguro_debit", shop=get_default_shop(), name="debit", enabled=True, tax_class=get_default_tax_class()) addr = MutableAddress.from_data( dict( name=u"Dog Hello", suffix=", Esq.", postal_code="89999-999", street="Woof Ave.", city="Dog Fort", country="GB", phone="47 98821-2231", )) patch("shuup.testing.factories.get_address", new_callable=lambda: addr) addresses_path = reverse("shuup:checkout", kwargs={"phase": "addresses"}) methods_path = reverse("shuup:checkout", kwargs={"phase": "methods"}) payment_path = reverse("shuup:checkout", kwargs={"phase": "payment"}) confirm_path = reverse("shuup:checkout", kwargs={"phase": "confirm"}) addresses_soup = c.soup(addresses_path) inputs = fill_address_inputs(addresses_soup, with_company=False) c.post(addresses_path, data=inputs) c.post(methods_path, data={ "payment_method": payment_method.pk, "shipping_method": shipping_method.pk }) c.get(confirm_path) c.soup(payment_path) c.post( payment_path, { "paymentMethod": PagSeguroPaymentMethodIdentifier.Debito, "bankOption": "{0}".format(PagSeguroPaymentMethodCode.DebitoOnlineBB.value), "senderHash": "J7E98Y37WEIRUHDIAI9U8RYE7UQE" }) confirm_soup = c.soup(confirm_path) c.post(confirm_path, data=extract_form_fields(confirm_soup)) order = Order.objects.filter(payment_method=payment_method).first() USER_MODEL = get_user_model() order.phone = "47 98821-2231" order.creator = get_user_model().objects.create_user( **{ USER_MODEL.USERNAME_FIELD: "*****@*****.**", "password": "******" }) order.save() pagseguro = PagSeguro.create_from_config(get_pagseguro_config()) # unpatch methods patch.stopall() # Test: get_session_id - SUCCESS session_xml_fake = """<session><id>WOEIFJE9IUREI29RU8</id></session>""" response = Mock() response.status_code = 200 response.content = session_xml_fake with patch.object(requests, 'post', return_value=response): session_id = xmltodict.parse(session_xml_fake)['session']['id'] assert pagseguro.get_session_id() == session_id # Test: get_session_id - EXCEPTION response = Mock() response.status_code = 500 response.content = ERROR_XML with patch.object(requests, 'post', return_value=response): with pytest.raises(PagSeguroException) as exc: pagseguro.get_session_id() assert exc.value.status_code == 500 assert exc.value.error == ERROR_XML # Test: get_notification_info - SUCCESS transaction_xml_fake = """<transaction><date>WOEIFJE9IUREI29RU8</date></transaction>""" response = Mock() response.status_code = 200 response.content = transaction_xml_fake with patch.object(requests, 'get', return_value=response): assert pagseguro.get_notification_info("XXXX") == xmltodict.parse( transaction_xml_fake) # Test: get_notification_info - EXCEPTION response = Mock() response.status_code = 500 response.content = ERROR_XML with patch.object(requests, 'get', return_value=response): with pytest.raises(PagSeguroException) as exc: pagseguro.get_notification_info("ZZZZ") assert exc.value.status_code == 500 assert exc.value.error == ERROR_XML # Test: get_transaction_info - SUCCESS transaction_xml_fake = """<transaction><date>WOEIFJE9IUREI29RU8</date></transaction>""" response = Mock() response.status_code = 200 response.content = transaction_xml_fake with patch.object(requests, 'get', return_value=response): assert pagseguro.get_transaction_info("XXXX") == xmltodict.parse( transaction_xml_fake) # Test: get_transaction_info - EXCEPTION response = Mock() response.status_code = 500 response.content = ERROR_XML with patch.object(requests, 'get', return_value=response): with pytest.raises(PagSeguroException) as exc: pagseguro.get_transaction_info("ZZZZ") assert exc.value.status_code == 500 assert exc.value.error == ERROR_XML # Test: pay - SUCCESS response = Mock() response.status_code = 200 response.content = TRANSACTION_XML with patch.object(requests, 'post', return_value=response): parsed_xml = xmltodict.parse(TRANSACTION_XML) result = pagseguro.pay(payment_method, order) assert isinstance(result, PagSeguroPaymentResult) assert result.error is False assert result._data == parsed_xml assert result.code == parsed_xml['transaction']['code'] assert result.payment_link == parsed_xml['transaction']['paymentLink'] # Test: pay - ERROR response = Mock() response.status_code = 400 response.content = ERROR_XML with patch.object(requests, 'post', return_value=response): parsed_xml = xmltodict.parse(ERROR_XML) result = pagseguro.pay(payment_method, order) assert isinstance(result, PagSeguroPaymentResult) assert result.error is True assert result._data == parsed_xml assert result.errors == parsed_xml['errors'] # Test: pay - EXCEPTION response = Mock() response.status_code = 500 response.content = "A BIG MISTAKE" with patch.object(requests, 'post', return_value=response): with pytest.raises(PagSeguroException) as exc: pagseguro.pay(payment_method, order) assert exc.value.status_code == 500 assert exc.value.error == "A BIG MISTAKE" def get_only_nums(value): return "".join([c for c in value if c.isdigit()]) # Test: _get_payment_xml with CPF order.creator.pf_person = PFPerson() payment_xml = xmltodict.parse( pagseguro._get_payment_xml(payment_method, order)) assert payment_xml['payment']['currency'] == 'BRL' assert payment_xml['payment'][ 'method'] == PagSeguroPaymentMethodIdentifier.Debito assert payment_xml['payment']['sender']['documents']['document'][ 'type'] == 'CPF' assert payment_xml['payment']['sender']['documents']['document'][ 'value'] == get_only_nums(PFPerson().cpf) # Test: _get_payment_xml with CNPJ delattr(order.creator, 'pf_person') order.creator.pj_person = PJPerson() payment_xml = xmltodict.parse( pagseguro._get_payment_xml(payment_method, order)) assert payment_xml['payment']['sender']['documents']['document'][ 'type'] == 'CNPJ' assert payment_xml['payment']['sender']['documents']['document'][ 'value'] == get_only_nums(PJPerson().cnpj) assert payment_xml['payment']['sender']['phone']['areaCode'] == "47" assert payment_xml['payment']['sender']['phone']['number'] == "988212231"
# worker_results = mturk.list_assignments_for_hit(HITId=row[2]) # assignment_id = worker_results['Assignments']['AssignmentId'] # mturk.reject_assignment(AssignmentId=assignment_id,RequesterFeedback="No code provided") # c.execute("DELETE from hits WHERE unique_code_1=?", (row[4],)) # if status is 1 (meaning we have sent back email to user) if row[3] == 1: print "status is 1" hit_id = row[2] worker_results = mturk.list_assignments_for_hit(HITId=hit_id) print worker_results if worker_results['NumResults'] > 0: # for each assignment ID in corresponding HIT ID for assignment in worker_results['Assignments']: assignment_id = assignment['AssignmentId'] xml_doc = xmltodict.parse(assignment['Answer']) turk_response = xml_doc['QuestionFormAnswers']['Answer']['FreeText'] # check to see if response from user equals code sent if turk_response == new_code: # if code_2 cipher code_1 works mark assignment as complete and delete from db mturk.approve_assignment(AssignmentId=assignment_id) c.execute("DELETE from hits WHERE unique_code_1=?", (row[4],)) else: mturk.reject_assignment(AssignmentId=assignment_id,RequesterFeedback="Incorrect code provided") c.execute("DELETE from hits WHERE unique_code_1=?", (row[4],)) else: print "no responses currently" conn.commit() c.close() conn.close() print "finished"
import socket import itertools as it from urllib import urlencode from datetime import datetime from amara.thirdparty import json from amara.lib.iri import is_absolute from dplaingestion.selector import exists from dplaingestion.selector import setprop from dplaingestion.selector import getprop as get_prop from dplaingestion.utilities import iterify, couch_id_builder def getprop(obj, path): return get_prop(obj, path, keyErrorAsNone=True) XML_PARSE = lambda doc: xmltodict.parse(doc, xml_attribs=True, attr_prefix='', force_cdata=False, ignore_whitespace_cdata=True) class Fetcher(object): """The base class for all fetchers. Includes attributes and methods that are common to all types. """ def __init__(self, profile, uri_base, config_file): """Set common attributes""" self.uri_base = uri_base self.config = ConfigParser.ConfigParser() with open(config_file, "r") as f: self.config.readfp(f) # Which OAI sets get added as collections self.sets = profile.get("sets")
import glob, os import xmltodict os.makedirs("data/harvest_LG_AI", exist_ok=True) filenames = glob.glob("data/harvest/*.xml") for filename in filenames: with open(filename, "r") as f: record_xml = f.read() if len(record_xml) < 10: # must be bad record print(f"Bad record {filename}") continue record_dict = xmltodict.parse( record_xml, process_namespaces=False)['record']['metadata']['arXiv'] categories = record_dict['categories'] if categories.find("cs.LG") == -1 and categories.find("cs.AI") == -1: continue new_filename = f"./data/harvest_LG_AI/{os.path.basename(filename)}" # print(f"move {filename}, {new_filename}") os.rename(filename, new_filename) # break
def xmlProxy(originalXMLWebserviceURL): decodedURL = urllib2.unquote(originalXMLWebserviceURL) f = urllib2.urlopen(decodedURL) xml = f.read() parsedXML = xmltodict.parse(xml) return json.dumps(parsedXML)
img_dir = system["monk_img_dir"]; elif(system["anno_type"] == "voc"): root_dir = system["voc_root_dir"]; img_dir = system["voc_img_dir"]; anno_dir = system["voc_anno_dir"]; files = os.listdir(root_dir + "/" + anno_dir); combined = []; for i in tqdm(range(len(files))): annoFile = root_dir + "/" + anno_dir + "/" + files[i]; f = open(annoFile, 'r'); my_xml = f.read(); anno = dict(dict(xmltodict.parse(my_xml))["annotation"]) fname = anno["filename"]; label_str = ""; if(type(anno["object"]) == list): for j in range(len(anno["object"])): obj = dict(anno["object"][j]); label = anno["object"][j]["name"]; bbox = dict(anno["object"][j]["bndbox"]) x1 = bbox["xmin"]; y1 = bbox["ymin"]; x2 = bbox["xmax"]; y2 = bbox["ymax"]; if(j == len(anno["object"])-1): label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label; else: label_str += x1 + " " + y1 + " " + x2 + " " + y2 + " " + label + " ";
'username': config['username'], 'password': config['password'], } pub_topic = '/home/gwl/bms/' url = 'http://' + config['cpm_url'] + '/bcc.xml' while True: try: with eventlet.Timeout(10): response = requests.get(url, verify=False) except: # print("Exception \n") time.sleep(60) # print("Exception next \n") else: dict_data = xmltodict.parse(response.content) d = dict_data['data'] for key in d: # print(key + " " +dict_data['data'][key] ) if dict_data['data'][key] is not 'N/A': try: publish.single(pub_topic + key, str(dict_data['data'][key]), hostname=config['mqtt_host'], port=config['mqtt_port'], retain=1, qos=2, auth=auth, tls={}) except:
def _query_and_parse(self, url): t = time.time() response = requests.get(url) time.sleep(max(self.throttle-(time.time()-t), 0)) # simple throttling return xmltodict.parse(response.content)
pip install xmltodict import xmltodict with open("artikelen.xml", "r") as f: xmldict = xmltodict.parse(f.read()) for artikel in xmldict["artikelen"]["artikel"]: print(artikel["naam"])
def get_tree(self) -> Tree: raw_xml = self.adb.dump_ui() xml_dict = xmltodict.parse(raw_xml, encoding="utf-8") c = Compiler() return c.compile2tree(xml_dict)
#!/usr/bin/env/python import xmltodict with open("show_security_zones.xml", "r") as xml_file: my_xml = xmltodict.parse(xml_file.read().strip()) print(my_xml) print(type(my_xml)) n = 1 for elem in my_xml['zones-information']['zones-security']: print("Security Zone #" + str(n) + ": " + elem['zones-security-zonename']) n += 1
# load context ctx = util.loadContext(path="/geni-context.json", key_passphrase=os.environ['GENI_KEY_PASSPHRASE']) # create slice util.createSlice(ctx, slice_name, 300, True) # identify the cluster cluster = str(os.environ['NODE_CLUSTER']) if cluster == 'utah': cluster = cloudlab.Utah elif cluster == 'wisconsin': cluster = cloudlab.Wisconsin elif cluster == 'clemson': cluster = cloudlab.Clemson # create sliver manifest = util.createSliver(ctx, cluster, slice_name, request) dict_response = xmltodict.parse(manifest.text) node_ip_list = list() for idx in range(0, len(dict_response['rspec']['node'])): node_ip_list.append(str(dict_response['rspec']['node'][idx]['host']['@ipv4'])) with open('/workspace/geni/hosts', 'w') as f: f.write(' '.join(node_ip_list))
if not os.path.exists(xml_output_path): print("Creating directory {}".format(xml_output_path)) os.makedirs(xml_output_path) # create empty dataframe df = pd.DataFrame(columns=('timestamp', 'tmp2m', 'weather')) # fetch weather data for hour in range(hours): url = 'http://vrijeme.hr/tablice/hrvatska_n_{hour:02}.xml'.format( hour=hour) response = req.get(url) response.raise_for_status() data = response.content doc = xmltodict.parse(data) current_date = doc['Hrvatska']['DatumTermin']['Datum'] current_time = doc['Hrvatska']['DatumTermin']['Termin'] current_date_time = current_date + '_' + current_time current_date_time = dt.datetime.strptime(current_date_time, "%d.%m.%Y_%H") current_date_time = current_date_time.replace(tzinfo=local_tz) current_unix_time = to_timestamp(current_date_time) current_unix_time = int(current_unix_time) print('date =', current_date, ', time =', current_time, ', timestamp =', int(current_unix_time)) xml_filename = "{path}{timestamp}.xml".format(path=xml_output_path, timestamp=current_unix_time) with open(xml_filename, "w") as out_xml: out_xml.write(data)
def general_setting_handler(self, request): json_file = self.navigator.get_katana_dir() + os.sep + 'config.json' w_settings = self.navigator.get_warrior_dir( ) + 'Tools' + os.sep + 'w_settings.xml' elem_file = xml_controler.parse(w_settings) elem_file = elem_file.getroot() elem = self.search_by_name('def_dir', elem_file) def_dir_string = xml_controler.tostring(elem) def_dir_xml_obj = elem if request.method == 'POST': w_settings_data = { 'Setting': { 'Logsdir': '', 'Resultsdir': '', '@name': '' } } returned_json = json.loads(request.POST.get('data')) if os.environ["pipmode"] == 'True': if os.path.isdir(returned_json[0]['pythonsrcdir']): if os.path.split(returned_json[0]['pythonsrcdir'] )[-1] == 'Warriorspace' or os.path.split( returned_json[0]['pythonsrcdir'] )[-2] == 'Warriorspace': returned_json[0]['pythonsrcdir'] = returned_json[0][ 'pythonsrcdir'] else: try: os.mkdir(returned_json[0]['pythonsrcdir'] + '/Warriorspace') returned_json[0]['pythonsrcdir'] = returned_json[ 0]['pythonsrcdir'] + '/Warriorspace' except FileExistsError: returned_json[0]['pythonsrcdir'] = returned_json[ 0]['pythonsrcdir'] + '/Warriorspace' else: return ref = { 'xmldir': "Testcases", 'testsuitedir': 'Suites', 'projdir': 'Projects', 'idfdir': 'Data', 'testdata': 'Config_files', 'testwrapper': 'wrapper_files' } for k, v in list(w_settings_data['Setting'].items()): w_settings_data['Setting'][k] = returned_json[0][k] del returned_json[0][k] if os.environ["pipmode"] == 'True': for key, value in returned_json[0].items(): if key in ref.keys() and returned_json[0][ 'pythonsrcdir'] != "" and returned_json[0][ key] == "": returned_json[0][key] = returned_json[0][ 'pythonsrcdir'] + '/' + ref[key] if not os.path.isdir(returned_json[0][key]): os.mkdir(returned_json[0][key]) elem_file.remove(def_dir_xml_obj) val = xmltodict.unparse(w_settings_data, pretty=True) elem_file.insert(0, xml_controler.fromstring(val)) with open(w_settings, 'wb') as f: f.write(xml_controler.tostring(elem_file)) with open(json_file, 'w') as f: f.write( json.dumps(returned_json[0], indent=4, separators=(',', ': '))) else: with open(json_file, 'r') as f: json_data = json.load(f) if os.environ["pipmode"] == 'True': pythonsrcdir = read_json_data(json_file)['pythonsrcdir'] else: pythonsrcdir = self.navigator.get_warrior_dir() data = { 'fromXml': xmltodict.parse(def_dir_string).get('Setting'), 'fromJson': validate_config_json(json_data, self.navigator.get_warrior_dir()) } return data
def post(self): xml_data = xmltodict.parse(request.get_data()) return dict(xml_data)
def add_certs_to_nsx_edge(nsx_edges_dir, nsx_edge, cert_section): map_nsx_esg_id( [ nsx_edge ] ) cert_config = cert_section.get('config') if not cert_config: print('No certs section to use an available cert or generate cert was specified for cert: {} for edge instance: {}'.\ format( cert_section['name'], nsx_edge['name'])) raise Exception('Creation of NSX Edge failed, no certs section was provided') if cert_config.get('cert_id'): print('Going to use available cert id: {} from its cert_config for edge instance: {}'.\ format(cert_config['cert_id'], nsx_edge['name'])) return if cert_config.get('key') and cert_config.get('cert'): print('Using the provided certs and key for associating with NSX Edge instance: {}'.format(nsx_edge['name'])) cert_config['key'] = cert_config.get('key').strip() + '\n' cert_config['cert'] = cert_config.get('cert').strip() + '\n' else: # Try to generate certs if key and cert are not provided generate_certs(cert_section) certPayloadFile = os.path.join(nsx_edges_dir, cert_section['name'] + '_cert_post_payload.xml') template_dir = '.' nsx_cert_context = { 'certs': cert_section, 'files': [] } template.render( certPayloadFile, os.path.join(template_dir, 'edge_cert_post_payload.xml' ), nsx_cert_context ) retry = True while (retry): retry = False post_response = client.post_xml(NSX_URLS['cert']['all'] + '/' + nsx_edge['id'], certPayloadFile, check=False) data = post_response.text if DEBUG: print('NSX Edge Cert {} addition response:\{}\n'.format(cert_section['name'], data)) if post_response.status_code < 400: certPostResponseDoc = xmltodict.parse(data) certId = certPostResponseDoc['certificates']['certificate']['objectId'] cert_section['cert_id'] = certId print('Added `{}` cert with id: `{}` to NSX Edge: `{}`\n'.format(cert_section['name'], cert_section['cert_id'], nsx_edge['name'])) return certId elif post_response.status_code == 404: print('NSX Edge {} not yet up, retrying!!'.format(nsx_edge['name'])) retry = True print('Going to retry addition of cert {} again... for NSX Edge: {}\n'.format(cert_section['name'], nsx_edge['name'])) else: print('Addition of NSX Edge Cert {} failed, details:{}\n'.format(cert_section['name'], data)) raise Exception('Addition of NSX Edge Cert `{}` failed, details:\n {}'.format(cert_section['name'], data))
def get_events(): with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file: events = xmltodict.parse(events_file.read())['events']['event'] return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json')
import xmltodict import yaml with open('projects.xml', 'r') as project_xml_file: # Use dict_constructore = dict so we don't get ordered dicts, we don't really care about ordering parsed = xmltodict.parse(project_xml_file.read(), dict_constructor=dict) # Multiline string to look nice'er def str_presenter(dumper, data): if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') return dumper.represent_scalar('tag:yaml.org,2002:str', data) yaml.add_representer(str, str_presenter) for project in parsed['Projects']['Project']: print("Would Create file: {0}.yaml".format(project['Name'])) serialized = yaml.safe_dump(project, encoding='utf-8', default_flow_style=False) print(serialized.decode()) with open("projects/{0}.yaml".format(project['Name']), 'w') as f: f.write(serialized.decode())
from ncclient import manager import xml.dom.minidom import xmltodict m = manager.connect(host="192.168.56.101", port=830, username="******", password="******", hostkey_verify=False) netconf_filter = """ <filter> <interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces"/> </filter> """ netconf_reply = m.get(filter=netconf_filter) print(xml.dom.minidom.parseString(netconf_reply.xml).toprettyxml()) netconf_reply_dict = xmltodict.parse(netconf_reply.xml) for interface in netconf_reply_dict["rpc-reply"]["data"]["interfaces-state"][ "interface"]: print("Name: {} MAC: {} Input: {} Output {}".format( interface["name"], interface["phys-address"], interface["statistics"]["in-octets"], interface["statistics"]["out-octets"]))
from imdb import IMDb ia = IMDb() import xmltodict, json movies = open("storage/my_movies.txt", "r").read().split('\n') all_movies = [] json_to_create = open("storage/my_movies.json", "w") x = 0 for m in movies: try: int(m[2:9]) ia_movie = ia.get_movie(m[2:9]).asXML() o = xmltodict.parse(ia_movie) o["my_rating"] = m[15:16] all_movies.append(o) x = x + 1 print(x) except ValueError: print("Oops! That was no valid number. Try again!") print(len(all_movies)) obj = { "movies": all_movies } json_to_create.write(json.dumps(obj))
import xmltodict #Get the XML file data from stream = open ('sample.xml', 'r') #Prase the XML file into an 'OrderDict' xml = xmltodict.parse(stream.read()) for e in xml["People"]["Person"]: print(e)
''' Simple Billing Program Created on 15/03/2019 @author: Terence ''' import xmltodict import urllib.request fileIn = urllib.request.urlopen( 'http://personal.cityu.edu.hk/~dcywchan/1718SemB121COM/client.xml') byteStr = fileIn.read() lines = byteStr.decode('utf-8') dl = xmltodict.parse(lines) clientDT = ['Name', 'Address', 'Balance'] clientDL = [] newdl = [] for e in dl['root']['item']: tmpdl = {} for key, value in e.items(): if key != "@type": value = value["#text"] tmpdl.update({key: value}) newdl.append(tmpdl)
def test_generator(self): obj = {'a': {'b': ['1', '2', '3']}} def lazy_obj(): return {'a': {'b': (i for i in ('1', '2', '3'))}} self.assertEqual(obj, parse(unparse(lazy_obj()))) self.assertEqual(unparse(lazy_obj()), unparse(parse(unparse(lazy_obj()))))