def write(path, doc, unix=False): # Convert items to list doc['anime-list']['anime'] = doc['anime-list']['anime'].values() # Encode document buf = StringIO() unparse( doc, output=buf, indent=' ', newl='\n', pretty=True, short_empty_elements=True ) # Convert to string data = buf.getvalue() + '\n' if unix: data = data.replace('\n', '\r\n') # Write data to path with open(path, 'w') as fp: fp.write(data)
def test_multiple_roots_nofulldoc(self): obj = OrderedDict((('a', 1), ('b', 2))) xml = unparse(obj, full_document=False) self.assertEqual(xml, '<a>1</a><b>2</b>') obj = {'a': [1, 2]} xml = unparse(obj, full_document=False) self.assertEqual(xml, '<a>1</a><a>2</a>')
def export_project(self): output = copy.deepcopy(self.generated_project) expanded_dic = self.workspace.copy() # data for .vcxproj expanded_dic['vcxproj'] = {} expanded_dic['vcxproj'] = self._set_vcxproj(expanded_dic['name']) # data for debugger for pyOCD expanded_dic['vcxproj_user'] = {} # TODO: localhost and gdb should be misc for VS ! Add misc options vcxproj_user_dic = self._set_vcxproj_user('localhost:3333', 'arm-none-eabi-gdb', os.path.join(expanded_dic['build_dir'], expanded_dic['name']), os.path.join(os.getcwd(), expanded_dic['output_dir']['path'])) self._set_groups(expanded_dic) # Project files project_path, output = self._generate_vcxproj_files(expanded_dic, expanded_dic['name'], expanded_dic['output_dir']['path'], vcxproj_user_dic) # NMake and debugger assets # TODO: not sure about base class having NMake and debugger. We might want to disable that by default? self.gen_file_raw(xmltodict.unparse(self.linux_nmake_xaml, pretty=True), 'linux_nmake.xaml', expanded_dic['output_dir']['path']) self.gen_file_raw(xmltodict.unparse(self.linux_debugger_xaml, pretty=True), 'LocalDebugger.xaml', expanded_dic['output_dir']['path']) return output
def make_qbxml(self, query=None, payload=None): """ Outputs a valid QBXML if there is a payload it will be included in the output :param query is the full name of the object like CustomerAddRq :param payload is the optional payload , it is required when adding items. """ if payload: qb_request = payload else: qb_request = None qbxml_query = { 'QBXML': { 'QBXMLMsgsRq': { '@onError': "stopOnError", query: qb_request } } } data_xml = self.xml_soap(xmltodict.unparse(qbxml_query, full_document=False)) data_xml = xmltodict.unparse(qbxml_query, full_document=False) res = self.xml_prefix + data_xml return res
def StaticDiscovery(): """ """ p = Associate() for obisInfo in OBISList: a = obisInfo['code'][0] b = obisInfo['code'][1] c = obisInfo['code'][2] d = obisInfo['code'][3] e = obisInfo['code'][4] f = obisInfo['code'][5] ic = int(obisInfo['ic']) hexCode = '%02x%02x%02x%02x%02x%02x'%(a,b,c,d,e,f) print('------- getting %s ic=%d --------'%(hexCode,ic)) if ic == 3: rq = DLMSPlayground.CreateGetRequest(ic,hexCode,3) elif ic == 4: rq = DLMSPlayground.CreateGetRequest(ic,hexCode,4) elif ic == 5: rq = DLMSPlayground.CreateGetRequest(ic,hexCode,4) elif ic == 8: rq = DLMSPlayground.CreateGetRequest(ic,hexCode,3) elif ic == 1: rq = DLMSPlayground.CreateGetRequest(ic,hexCode,3) else: print('oops...') pass print(rq) DLMSPlayground.SendHDLCToMeter(p, rq ) time.sleep(0.5) rsp = DLMSPlayground.GetResponseFromMeter(p) print(rsp) print( DLMS.HDLCToDict(rsp) ) if rsp != None: xmlAttr3 = xmltodict.unparse(DLMS.HDLCToDict(rsp)) rq = DLMSPlayground.CreateGetRequest(ic,hexCode,2) print(rq) DLMSPlayground.SendHDLCToMeter(p, rq ) time.sleep(0.5) rsp = DLMSPlayground.GetResponseFromMeter(p) print(rsp) print( DLMS.HDLCToDict(rsp) ) if rsp != None: xmlAttr2 = xmltodict.unparse(DLMS.HDLCToDict(rsp)) combinedXML = '<Object><OBIS value="%s" /><ClassID Value="%d" /><Attribute2Read>%s</Attribute2Read>\n<Attribute3Read>%s</Attribute3Read></Object>'%(hexCode,ic,xmlAttr2,xmlAttr3) combinedXML = re.sub(r'<\?.*\?>','',combinedXML) open('Objects/%s.xml'%(hexCode),'w+').write(combinedXML)
def write_config_xml(xmlfile, dict): try: with open(xmlfile, "wt") as fo: xmltodict.unparse(dict, fo, pretty=True) except IOError as e: print "Error writing XML file: ", e return False return True
def check_notify(self, xml_notify, handle): self._logResp(xml_notify, 'Wechat pay notify') content = self._check_error(xml_notify) result, fail_msg = handle(**content) if result: return unparse(dict(xml={'return_code': 'SUCCESS'})) else: self.log.error('Wechat pay notify fail: {}'.format(fail_msg)) return unparse(dict(xml={'return_code': 'FAIL', 'return_msg': fail_msg}))
def test_encoding(self): try: value = unichr(39321) except NameError: value = chr(39321) obj = {'a': value} utf8doc = unparse(obj, encoding='utf-8') latin1doc = unparse(obj, encoding='iso-8859-1') self.assertEqual(parse(utf8doc), parse(latin1doc)) self.assertEqual(parse(utf8doc), obj)
def test_multiple_roots(self): try: unparse({'a': '1', 'b': '2'}) self.fail() except ValueError: pass try: unparse({'a': ['1', '2', '3']}) self.fail() except ValueError: pass
def _api_out_as(self, out): """ Formats the response to the desired output """ if self._api_cmd == "docs_md": return out["response"]["data"] elif self._api_cmd == "download_log": return elif self._api_cmd == "pms_image_proxy": cherrypy.response.headers["Content-Type"] = "image/jpeg" return out["response"]["data"] if self._api_out_type == "json": cherrypy.response.headers["Content-Type"] = "application/json;charset=UTF-8" try: if self._api_debug: out = json.dumps(out, indent=4, sort_keys=True) else: out = json.dumps(out) if self._api_callback is not None: cherrypy.response.headers["Content-Type"] = "application/javascript" # wrap with JSONP call if requested out = self._api_callback + "(" + out + ");" # if we fail to generate the output fake an error except Exception as e: logger.info(u"PlexPy APIv2 :: " + traceback.format_exc()) out["message"] = traceback.format_exc() out["result"] = "error" elif self._api_out_type == "xml": cherrypy.response.headers["Content-Type"] = "application/xml" try: out = xmltodict.unparse(out, pretty=True) except Exception as e: logger.error(u"PlexPy APIv2 :: Failed to parse xml result") try: out["message"] = e out["result"] = "error" out = xmltodict.unparse(out, pretty=True) except Exception as e: logger.error(u"PlexPy APIv2 :: Failed to parse xml result error message %s" % e) out = ( """<?xml version="1.0" encoding="utf-8"?> <response> <message>%s</message> <data></data> <result>error</result> </response> """ % e ) return out
def convert(self, path, data): if isinstance(data, list): data = {'items': {str(k): v for k, v in enumerate(data)}} try: with open(path, 'w') as outfile: xmltodict.unparse(data, outfile, pretty=True) except Exception: logger.exception(u'File `{}` can not be parsed in xml'.format(path)) os.remove(path) raise return path
def _api_out_as(self, out): """ Formats the response to the desired output """ if self._api_cmd == 'docs_md': return out['response']['data'] elif self._api_cmd == 'download_log': return elif self._api_cmd == 'pms_image_proxy': cherrypy.response.headers['Content-Type'] = 'image/jpeg' return out['response']['data'] if self._api_out_type == 'json': cherrypy.response.headers['Content-Type'] = 'application/json;charset=UTF-8' try: if self._api_debug: out = json.dumps(out, indent=4, sort_keys=True) else: out = json.dumps(out) if self._api_callback is not None: cherrypy.response.headers['Content-Type'] = 'application/javascript' # wrap with JSONP call if requested out = self._api_callback + '(' + out + ');' # if we fail to generate the output fake an error except Exception as e: logger.info(u'PlexPy APIv2 :: ' + traceback.format_exc()) out['message'] = traceback.format_exc() out['result'] = 'error' elif self._api_out_type == 'xml': cherrypy.response.headers['Content-Type'] = 'application/xml' try: out = xmltodict.unparse(out, pretty=True) except Exception as e: logger.error(u'PlexPy APIv2 :: Failed to parse xml result') try: out['message'] = e out['result'] = 'error' out = xmltodict.unparse(out, pretty=True) except Exception as e: logger.error(u'PlexPy APIv2 :: Failed to parse xml result error message %s' % e) out = '''<?xml version="1.0" encoding="utf-8"?> <response> <message>%s</message> <data></data> <result>error</result> </response> ''' % e return out
def __call__(self, text): soap_version = None try: data = xmltodict.parse(text, process_namespaces=True, namespaces=self.parse_ns) if "s11:Envelope" in data: soap_version = SOAP11 body = data["s11:Envelope"]["s11:Body"] elif "s12:Envelope" in data: soap_version = SOAP12 body = data["s12:Envelope"]["s12:Body"] else: if self.ctx: self.ctx.exc_info = None self.ctx.error = True return "Missing SOAP Envelope" res = self.app(body) out = OrderedDict([ ("env:Envelope", OrderedDict([ ("@xmlns:env", soap_version), ("env:Body", res), ])), ]) # Add namespace attributes, preferably to the inner top-level element # but fallback to putting them on the Envelope root = out["env:Envelope"] try: keys = res.keys() if len(keys) == 1: root = res[keys[0]] except AttributeError: pass for (k, v) in self.namespaces.iteritems(): root["@xmlns:"+v] = k # Add canned attributes, typically for adding encodingStyle # (Note: SOAP 1.1 allows this to be anywhere including on the # envelope, but SOAP 1.2 is more restrictive) if self.reply_attrs: root.update(self.reply_attrs) return xmltodict.unparse(out, **self.unparse_options) except Exception: if not self.trap_exception: raise exc_info = sys.exc_info() if self.ctx: # This allows the exception to be logged elsewhere, # and for a HTTP connector to return a 500 status code self.ctx.exc_info = exc_info self.ctx.error = True return xmltodict.unparse(self.fault(exc_info, soap_version), **self.unparse_options)
def test_nested(self): obj = {"a": {"b": "1", "c": "2"}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj)))) obj = {"a": {"b": {"c": {"@a": "x", "#text": "y"}}}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj))))
def test_nested(self): obj = {'a': {'b': '1', 'c': '2'}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj)))) obj = {'a': {'b': {'c': {'@a': 'x', '#text': 'y'}}}} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj))))
def inputsToBaclava(self): """ Returns an XML in baclava format corresponding to previously specified input ports and values. """ if len(self.inputsDic)==0: return None; try: enclosingDicMap = {} enclosingDicMap['@xmlns:b']='http://org.embl.ebi.escience/baclava/0.1alpha' baclavaDic = { 'b:dataThingMap': enclosingDicMap} baseDoc = xmltodict.unparse(baclavaDic, pretty=True) fullDataThingStringList = "" for port in self.inputsDic.keys(): if self.inputsDic[port]!=None: mimeTypesDict = { 's:mimeTypes' : {'s:mimeType' : 'text/plain'}} mimeTypesDict['@xmlns:s'] = 'http://org.embl.ebi.escience/xscufl/0.1alpha' metadataDict = { 's:metadata' : mimeTypesDict} metadataDict[ '@lsid'] ='' metadataDict[ '@syntactictype']="'text/plain'" if len(self.inputsDic[port])==1: dataElementDataDict = { 'b:dataElementData': base64.b64encode(self.inputsDic[port][0])} dataElementDataDict [ '@lsid'] ='' metadataDict[ 'b:dataElement'] = dataElementDataDict else: relationEmptyDict = [{ '@parent': "0", '@child': "1" }] for i in range(2,len(self.inputsDic[port])): relationEmptyDict.append({ '@parent': str(i-1), '@child': str(i) }) relationDict = { 'b:relation' : relationEmptyDict } relationListDict = { 'b:relationList': relationDict , '@lsid': "" , '@type': "list"} dataElementDataDict = [] for i in range(len(self.inputsDic[port])): dataElementDataDict.append( { 'b:dataElementData': base64.b64encode(self.inputsDic[port][i]), '@lsid': "", '@index': str(i)} ) dataElementDict = { 'b:dataElement': dataElementDataDict} relationListDict['b:itemList'] = dataElementDict metadataDict[ 'b:partialOrder'] = relationListDict myGridDataDocumentDict = { 'b:myGridDataDocument': metadataDict, '@key': port} dataThingDic = {'b:dataThing': myGridDataDocumentDict} dataThingDicString = xmltodict.unparse(dataThingDic, pretty=True) dataThingDicString = dataThingDicString[ dataThingDicString.find('\n') + 1 : ] fullDataThingStringList = fullDataThingStringList + dataThingDicString if fullDataThingStringList!="": baseDoc = baseDoc.replace("</b:dataThingMap>" , "\n" + fullDataThingStringList + "\n") baseDoc = baseDoc + "</b:dataThingMap>" return baseDoc except Exception as e: raise Exception('Error while generating Baclava string: ' + e.message)
def open_shell(self, i_stream='stdin', o_stream='stdout stderr', working_directory=None, env_vars=None, noprofile=False, codepage=437, lifetime=None, idle_timeout=None): """ Create a Shell on the destination host @param string i_stream: Which input stream to open. Leave this alone unless you know what you're doing (default: stdin) @param string o_stream: Which output stream to open. Leave this alone unless you know what you're doing (default: stdout stderr) @param string working_directory: the directory to create the shell in @param dict env_vars: environment variables to set for the shell. For instance: {'PATH': '%PATH%;c:/Program Files (x86)/Git/bin/', 'CYGWIN': 'nontsec codepage:utf8'} @returns The ShellId from the SOAP response. This is our open shell instance on the remote machine. @rtype string """ req = {'env:Envelope': self._get_soap_header( resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', # NOQA action='http://schemas.xmlsoap.org/ws/2004/09/transfer/Create')} header = req['env:Envelope']['env:Header'] header['w:OptionSet'] = { 'w:Option': [ { '@Name': 'WINRS_NOPROFILE', '#text': str(noprofile).upper() # TODO remove str call }, { '@Name': 'WINRS_CODEPAGE', '#text': str(codepage) # TODO remove str call } ] } shell = req['env:Envelope'].setdefault( 'env:Body', {}).setdefault('rsp:Shell', {}) shell['rsp:InputStreams'] = i_stream shell['rsp:OutputStreams'] = o_stream if working_directory: # TODO ensure that rsp:WorkingDirectory should be nested within rsp:Shell # NOQA shell['rsp:WorkingDirectory'] = working_directory # TODO check Lifetime param: http://msdn.microsoft.com/en-us/library/cc251546(v=PROT.13).aspx # NOQA #if lifetime: # shell['rsp:Lifetime'] = iso8601_duration.sec_to_dur(lifetime) # TODO make it so the input is given in milliseconds and converted to xs:duration # NOQA if idle_timeout: shell['rsp:IdleTimeOut'] = idle_timeout if env_vars: env = shell.setdefault('rsp:Environment', {}) for key, value in env_vars.items(): env['rsp:Variable'] = {'@Name': key, '#text': value} res = self.send_message(xmltodict.unparse(req)) #res = xmltodict.parse(res) #return res['s:Envelope']['s:Body']['x:ResourceCreated']['a:ReferenceParameters']['w:SelectorSet']['w:Selector']['#text'] root = ET.fromstring(res) return next( node for node in root.findall('.//*') if node.get('Name') == 'ShellId').text
def _to_xml(self): """Serialize object to xml for upload Returns ------- xml_dataset : str XML description of the data. """ props = ['id', 'name', 'version', 'description', 'format', 'creator', 'contributor', 'collection_date', 'upload_date', 'language', 'licence', 'url', 'default_target_attribute', 'row_id_attribute', 'ignore_attribute', 'version_label', 'citation', 'tag', 'visibility', 'original_data_url', 'paper_url', 'update_comment', 'md5_checksum'] # , 'data_file'] data_container = OrderedDict() data_dict = OrderedDict([('@xmlns:oml', 'http://openml.org/openml')]) data_container['oml:data_set_description'] = data_dict for prop in props: content = getattr(self, prop, None) if content is not None: data_dict["oml:" + prop] = content xml_string = xmltodict.unparse( input_dict=data_container, pretty=True, ) # A flow may not be uploaded with the xml encoding specification: # <?xml version="1.0" encoding="utf-8"?> xml_string = xml_string.split('\n', 1)[-1] return xml_string
def reportCreation(): error = None if 'token' in session: concur = ConcurClient() token = session['token'] username = session['username'] if request.method == 'POST': today = datetime.date.today() new_xml_update = {} new_xml_update['Report'] = {} #new_xml_update['Report']['ID'] = dict_resp_report['Report']['ID'] new_xml_update['Report']['Name'] = request.form['Name'] new_xml_update['Report']['Comment'] = request.form['Comment'] xml_post = xmltodict.unparse(new_xml_update) content_type, resp_post = concur.validate_response(concur.api('v3.0/expense/reports', method='POST', params={'access_token': token}, headers={'content-type': 'application/xml', 'accept':'*'}, data=xml_post)) #print resp_post['Response']['ID'] #print resp_post['Response']['URI'] return render_template('reportCreationSuccess.html', username=username, reportURL=resp_post['Response']['URI'], reportID=resp_post['Response']['ID'], error=error) return render_template('reportCreation.html', username=username, error=error) else: return 'Invalid Token - Please Login /login'
def api_publish(self, end_point = None): self._debug['msg_pubs'] += 1 ctype = bottle.request.headers['content-type'] json_req = {} if ctype == 'application/json': data = bottle.request.json for service_type, info in data.items(): json_req['name'] = service_type json_req['info'] = info elif ctype == 'application/xml': data = xmltodict.parse(bottle.request.body.read()) for service_type, info in data.items(): json_req['name'] = service_type json_req['info'] = dict(info) else: bottle.abort(400, e) sig = end_point or publisher_id( bottle.request.environ['REMOTE_ADDR'], json.dumps(json_req)) # Rx {'name': u'ifmap-server', 'info': {u'ip_addr': u'10.84.7.1', # u'port': u'8443'}} info = json_req['info'] service_type = json_req['name'] entry = self._db_conn.lookup_service(service_type, service_id=sig) if not entry: entry = { 'service_type': service_type, 'service_id': sig, 'in_use': 0, 'ts_use': 0, 'ts_created': int(time.time()), 'prov_state': 'new', 'remote': bottle.request.environ.get('REMOTE_ADDR'), 'sequence': str(int(time.time())) + socket.gethostname(), } elif 'sequence' not in entry or self.service_expired(entry): # handle upgrade or republish after expiry entry['sequence'] = str(int(time.time())) + socket.gethostname() entry['info'] = info entry['admin_state'] = 'up' entry['heartbeat'] = int(time.time()) # insert entry if new or timed out self._db_conn.update_service(service_type, sig, entry) response = {'cookie': sig + ':' + service_type} if ctype != 'application/json': response = xmltodict.unparse({'response': response}) self.syslog('publish service "%s", sid=%s, info=%s' % (service_type, sig, info)) if not service_type.lower() in self.service_config: self.service_config[ service_type.lower()] = self._args.default_service_opts return response
def create_monitor(self, topic, url, auth_user, auth_pass, description=None, batch_size=None, batch_duration=None): """ Create a new Device Cloud http monitor for the specified topic """ post_dict = { 'Monitor': { 'monTopic': topic, 'monTransportType': 'http', 'monTransportUrl': url, 'monTransportToken': ':'.join([auth_user, auth_pass]), 'monFormatType': 'json', }, } if description: post_dict['Monitor']['monDescription'] = description if batch_size: post_dict['Monitor']['monBatchSize'] = batch_size if batch_duration: post_dict['Monitor']['monBatchDuration'] = batch_duration post_body = xmltodict.unparse(post_dict) uri = ws_uri.format(resource=MONITOR_RESOURCE, fqdn=self.cloud_fqdn, path_filter="") r = self._post(uri, data=post_body) return _parse_response(r)
def _request(self, method, path, data = None, skip_unparse = False): """ Internal method for making requests to the FastSpring server. """ if data and not skip_unparse: body = xmltodict.unparse(data) else: body = data authstring = 'user={}&pass={}'.format(self.username, self.password) if path.startswith('/'): path = path[1:] if not path.endswith('/'): path += '/' request_path = '/company/{}/{}?{}'.format(self.company, path, authstring) if self.debug: print('-'*80) print('{} {}{}'.format(method, self.api_domain, request_path)) print(body) print('-'*80) conn = http.client.HTTPSConnection(self.api_domain) headers = {"Content-type": "application/xml"} conn.request(method, request_path, body, headers) resp = conn.getresponse() status = resp.status message = resp.msg reason = resp.reason content = resp.read() return content, status, message, reason
def cleanup_command(self, shell_id, command_id): """ Clean-up after a command. @see #run_command @param string shell_id: The shell id on the remote machine. See #open_shell @param string command_id: The command id on the remote machine. See #run_command @returns: This should have more error checking but it just returns true for now. @rtype bool """ message_id = uuid.uuid4() req = {'env:Envelope': self._get_soap_header( resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', # NOQA action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Signal', # NOQA shell_id=shell_id, message_id=message_id)} # Signal the Command references to terminate (close stdout/stderr) signal = req['env:Envelope'].setdefault( 'env:Body', {}).setdefault('rsp:Signal', {}) signal['@CommandId'] = command_id signal['rsp:Code'] = 'http://schemas.microsoft.com/wbem/wsman/1/windows/shell/signal/terminate' # NOQA res = self.send_message(xmltodict.unparse(req)) root = ET.fromstring(res) relates_to = next( node for node in root.findall('.//*') if node.tag.endswith('RelatesTo')).text # TODO change assert into user-friendly exception assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id
def setUp(self): """ Create a check """ core = MimicCore(Clock(), []) self.root = MimicRoot(core).app.resource() self.create_check = { "check": { "attributes": { "name": "name", "module": "module", "target": "target", "period": "period", "timeout": "timeout", "filterset": "filterset" } }} self.create_check_xml_payload = xmltodict.unparse(self.create_check ).encode("utf-8") self.check_id = uuid.uuid4() url = "noit/checks/set/{0}".format(self.check_id) (self.response, response_body) = self.successResultOf( request_with_content(self, self.root, "PUT", url, body=self.create_check_xml_payload)) self.create_json_response = xmltodict.parse(response_body)
def replace_spreadsheet_row_values( cellsfeed_url, row_number, column_values, use_raw_values=False ): from collections import OrderedDict import xmltodict for index, column in enumerate(column_values, start=1): cell = 'R' + str(row_number) + 'C' + str(index) cell_url = cellsfeed_url + '/' + cell value = str(column) if not use_raw_values: value = "'" + value entry = OrderedDict() entry['@xmlns'] = 'http://www.w3.org/2005/Atom' entry['@xmlns:gs'] = "http://schemas.google.com/spreadsheets/2006" entry['id'] = cell_url entry['link'] = OrderedDict() entry['link']['@rel'] = 'edit' entry['link']['@type'] = 'application/atom+xml' entry['link']['@href'] = cell_url entry['gs:cell'] = OrderedDict() entry['gs:cell']['@row'] = str(row_number) entry['gs:cell']['@col'] = str(index) entry['gs:cell']['@inputValue'] = unicode( value ) entry_dict = OrderedDict([ ( 'entry', entry ) ] ) entry_xml = xmltodict.unparse( entry_dict ).encode('utf-8') make_authorized_request( cell_url, None, 'PUT', entry_xml, { 'If-None-Match' : 'replace' } )
def generate_flow_xml(classifier): import sklearn flow_dict = OrderedDict() flow_dict['oml:flow'] = OrderedDict() flow_dict['oml:flow']['@xmlns:oml'] = 'http://openml.org/openml' flow_dict['oml:flow']['oml:name'] = classifier.__module__ +"."+ classifier.__class__.__name__ flow_dict['oml:flow']['oml:external_version'] = 'Tsklearn_'+sklearn.__version__ flow_dict['oml:flow']['oml:description'] = 'Flow generated by openml_run' clf_params = classifier.get_params() flow_parameters = [] for k, v in clf_params.items(): # data_type, default_value, description, recommendedRange # type = v.__class__.__name__ Not using this because it doesn't conform standards # eg. int instead of integer param_dict = {'oml:name':k} flow_parameters.append(param_dict) flow_dict['oml:flow']['oml:parameter'] = flow_parameters flow_xml = xmltodict.unparse(flow_dict, pretty=True) # A flow may not be uploaded with the encoding specification.. flow_xml = flow_xml.split('\n', 1)[-1] return flow_xml
def update_URDF_from_config(urdf_path, config_path): with open(urdf_path) as f: urdf = xmltodict.parse(f.read()) with open(config_path) as f: conf = json.load(f) confmotors = conf['motors'] joints = urdf['robot']['joint'] links = urdf['robot']['link'] # Update joint properties for j in joints: name = j['@name'] dxl = confmotors[name]['type'] ll, ul = confmotors[name]['angle_limit'] j['limit'] = collections.OrderedDict() # added by sebastien because no 'limit' key on head_z at least j['limit']['@lower'] = str(deg2rad(ll)) j['limit']['@upper'] = str(deg2rad(ul)) j['limit']['@effort'] = str(DXL2EFFORT[dxl]) j['limit']['@velocity'] = str(DXL2VEL[dxl]) # Update link properties for l in links: name = l['@name'] l['visual']['material']['color']['@rgba'] = COLOR l['mass'] = MASS[name] new_urdf = xmltodict.unparse(urdf, pretty=True) with open(urdf_path, 'w') as f: f.write(new_urdf)
def update_URDF_from_config(urdf_path, config_path): with open(urdf_path) as f: urdf = xmltodict.parse(f.read()) with open(config_path) as f: conf = json.load(f) confmotors = conf['motors'] joints = urdf['robot']['joint'] links = urdf['robot']['link'] # Update joint properties wrong_motors=[] for j in joints: name = j['@name'] dxl = confmotors[name]['type'] ll, ul = confmotors[name]['angle_limit'] j['limit']['@lower'] = str(deg2rad(ll)) j['limit']['@upper'] = str(deg2rad(ul)) j['limit']['@effort'] = str(DXL2EFFORT[dxl]) j['limit']['@velocity'] = str(DXL2VEL[dxl]) # Update motors rotation if name not in good_motors: list = j['axis']['@xyz'].split() new_list=[ '1' if i=='-1' else '-1' if i=='1' else i for i in list] j['axis']['@xyz']=' '.join(new_list) wrong_motors.append(name) # Update link properties for l in links: name = l['@name'] mesh = l['visual']['geometry']['mesh']['@filename'] if '_visual' not in mesh: pos = mesh.find('.') l['visual']['geometry']['mesh']['@filename'] = mesh[0:10]+mesh[24:pos]+'_visual.STL' mesh = l['collision']['geometry']['mesh']['@filename'] if '_respondable' not in mesh: pos = mesh.find('.') l['collision']['geometry']['mesh']['@filename'] = mesh[0:10]+mesh[24:pos]+'_respondable.STL' l['visual']['material']['color']['@rgba'] = COLOR #l['mass'] = MASS[name] new_urdf = xmltodict.unparse(urdf, pretty=True) pos = urdf_path.find('.U') urdf_path_new = urdf_path[0:pos]+'_vrep.URDF' with open(urdf_path_new, 'w') as f: f.write(new_urdf) return wrong_motors
def close_shell(self, shell_id): """ Close the shell @param string shell_id: The shell id on the remote machine. See #open_shell @returns This should have more error checking but it just returns true for now. @rtype bool """ message_id = uuid.uuid4() req = {'env:Envelope': self._get_soap_header( resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', # NOQA action='http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete', shell_id=shell_id, message_id=message_id)} # SOAP message requires empty env:Body req['env:Envelope'].setdefault('env:Body', {}) res = self.send_message(xmltodict.unparse(req)) root = ET.fromstring(res) relates_to = next( node for node in root.findall('.//*') if node.tag.endswith('RelatesTo')).text # TODO change assert into user-friendly exception assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id
def dict_to_xml(xmlpath, dic): with open(xmlpath, 'w') as f: f.write(xmltodict.unparse(dic, pretty=True))
def __post_request(self, dict_body: dict, api_url: str): """Create a HTTP post request. Parameters: data (dict): HTTP POST body data for the wanted API. api_url (str): Url for the wanted API. Returns: A response from the request (dict). """ api_headers = { 'content-type': 'application/xml' } body = xmltodict.unparse(dict_body) response = requests.post(api_url, headers=api_headers, data=body) parsed_xml = xmltodict.parse(response.text, force_list={self.__dimension}) parsed_response = json.loads(json.dumps(parsed_xml)) if response.status_code == 200: if parsed_response['response']['control']['status'] == 'success': api_response = parsed_response['response']['operation'] if parsed_response['response']['control']['status'] == 'failure': exception_msg = self.__decode_support_id(parsed_response['response']['errormessage']) raise WrongParamsError('Some of the parameters are wrong', exception_msg) if api_response['authentication']['status'] == 'failure': raise InvalidTokenError('Invalid token / Incorrect credentials', api_response['errormessage']) if api_response['result']['status'] == 'success': return api_response if api_response['result']['status'] == 'failure': exception_msg = self.__decode_support_id(api_response['result']['errormessage']) for error in exception_msg['error']: if error['description2'] and 'You do not have permission for API' in error['description2']: raise InvalidTokenError('The user has insufficient privilege', exception_msg) raise WrongParamsError('Error during {0}'.format(api_response['result']['function']), exception_msg) if response.status_code == 400: raise WrongParamsError('Some of the parameters are wrong', parsed_response) if response.status_code == 401: raise InvalidTokenError('Invalid token / Incorrect credentials', parsed_response) if response.status_code == 403: raise NoPrivilegeError('Forbidden, the user has insufficient privilege', parsed_response) if response.status_code == 404: raise NotFoundItemError('Not found item with ID', parsed_response) if response.status_code == 498: raise ExpiredTokenError('Expired token, try to refresh it', parsed_response) if response.status_code == 500: raise InternalServerError('Internal server error', parsed_response) raise SageIntacctSDKError('Error: {0}'.format(parsed_response))
def create_image_parts_single_image(image_path, bb_dict): """ Using normalised bounding box dictionary, generate boxes and divide image into segments and saving them into it's named folder """ im = Image.open(image_path) im_width, im_height = im.size #Reading template xml to generate xml file data = xmltodict.parse((open('template.xml'))) #Writing xml contents data['annotation']['filename'] = image_path.split('/')[-1] data['annotation']['path'] = '' data['annotation']['size']['width'] = im_width data['annotation']['size']['height'] = im_height for k in bb_dict.keys(): y_min = bb_dict[k][0] x_min = bb_dict[k][1] y_max = bb_dict[k][2] x_max = bb_dict[k][3] #xml part for y in data['annotation']['object']: name = k if k == 'Payto': name = 'Pay To' if y['name'] == name: y['bndbox']['xmin'] = round(x_min * im_width) y['bndbox']['xmax'] = round(x_max * im_width) y['bndbox']['ymin'] = round(y_min * im_height) y['bndbox']['ymax'] = round(y_max * im_height) #imagepart if k != 'For/Memo': #Cropping crop_rectangle = (x_min * im_width, y_min * im_height, x_max * im_width, y_max * im_height) cropped_im = im.crop(crop_rectangle) image_folder_name = image_path.split('/')[-1].split('.')[0] if image_folder_name not in os.listdir('imageparts'): os.mkdir('imageparts/{}'.format(image_folder_name)) #print k #print image_folder_name #Saving cropped_im.save('imageparts/{}/{}.jpg'.format( image_folder_name, k)) print 'imageparts/{}/{}.jpg'.format(image_folder_name, k) if 'testxmls' not in os.listdir('.'): os.mkdir('testxmls') #Saving xml file with open( "testxmls/{}.xml".format(image_path.split('/')[-1].split('.')[0]), "w") as f: f.write(xmltodict.unparse(data))
def create_fsblr_pair(self, source_client_id, destination_client_id, source_volumes, destination_volumes, recovery_type, **kwargs): """Creates a new FSBLR pair on the commcell with the specified options Args: source_client_id (str) : The source client's ID destination_client_id (str) : The destination client's ID source_volumes (list) : The list of all source volumes destination_volumes (list) : The list of all destination volumes recovery_type (RecoveryType): The enum to specify what type of recovery pair is supposed to be **kwargs (dict) : Only used for granular type FSBLR pairs rpstore_id (str) : The ID of the RPstore to be used rpstore_name (str) : The name of the RPStore ccrp_interval (int) : The number of minutes after which CCRP is taken acrp_interval (int) : The number of minutes after which ACRP is taken max_rp_interval (int) : The number of minutes after which RP store's retention is ended rp_merge_delay (int) : Merge recovery points older than time in minutes rp_retention (int) : The number of minutes for which RPstore is retained for rpstore_switch_live(int): The time in minutes after which pair is switch to live if RPstore is offline merge_only_off_peak(bool):Whether to merge RPstore only during off-peak time """ blr_options = { 'BlockReplication_BLRRecoveryOptions': { '@recoveryType': recovery_type.value, 'granularV2': { '@ccrpInterval': kwargs.get('ccrp_interval', 300), '@acrpInterval': kwargs.get('acrp_interval', 0), '@maxRpInterval': kwargs.get('max_rp_interval', 21600), '@rpMergeDelay': kwargs.get('rp_merge_delay', 172800), '@rpRetention': kwargs.get('rp_retention', 604800), '@maxRpStoreOfflineTime': kwargs.get('rpstore_switch_live', 0), '@useOffPeakSchedule': int(kwargs.get('merge_only_off_peak', False)), } }, } if kwargs.get('rpstore_id') and kwargs.get('rpstore_name'): granularv2 = blr_options['BlockReplication_BLRRecoveryOptions'][ 'granularV2'] granularv2['@rpStoreId'] = int(kwargs.get('rpstore_id', 0)) granularv2['@rpStoreName'] = kwargs.get('rpstore_name') blr_options['BlockReplication_BLRRecoveryOptions'][ 'granularV2'] = granularv2 source_client = self._commcell_object.clients.get( int(source_client_id)) destination_client = self._commcell_object.clients.get( int(destination_client_id)) source_client_volumes = source_client.get_mount_volumes(source_volumes) destination_client_volumes = destination_client.get_mount_volumes( destination_volumes) request_json = { "destEndPointType": self.EndPointTypes.FILESYSTEM.value, "blrRecoveryOpts": xmltodict.unparse(blr_options, short_empty_elements=True).replace('\n', ''), "srcEndPointType": self.EndPointTypes.FILESYSTEM.value, "srcDestVolumeMap": [], "destEntity": { "client": { "clientId": int(destination_client_id), "clientName": destination_client.client_name, "hasDrivesInPair": True, "tabLevel": "level-0", "checked": True, } }, "sourceEntity": { "client": { "clientId": int(source_client_id), "clientName": source_client.client_name, "hasDrivesInPair": True, "tabLevel": "level-0", "checked": True, } } } for source, destination in zip(source_client_volumes, destination_client_volumes): request_json['srcDestVolumeMap'].append({ "sourceVolumeGUID": source['guid'], "sourceVolume": source['accessPathList'][0], "destVolumeGUID": destination['guid'], "destVolume": destination['accessPathList'][0], "sourceVolumeSize": source['size'], "disabled": "", }) flag, response = (self._commcell_object._cvpysdk_object.make_request( 'POST', self._services['CREATE_BLR_PAIR'], request_json)) if flag: if response and response.json(): if response.json().get('errorCode', 0) != 0: response_string = self._commcell_object._update_response_( response.text) raise SDKException('Response', '101', response_string) else: raise SDKException('Response', '102') else: raise SDKException('Response', '101')
def json_to_xml(json_str): # xmltodict库的unparse()json转xml # 参数pretty 是格式化xml xml_str = xmltodict.unparse(json_str, pretty=1) return xml_str
def post_order_callback(request, body): new_order_id = max(orders.keys() or [0]) + 1 order_date = datetime.now().strftime('%d-%b-%y') order = body['order'] order['order-id'] = new_order_id # Ordering shopper section ordering_shopper = order['ordering-shopper'] # Validate shopper id try: raw_shopper_id = ordering_shopper['shopper-id'] shopper_id = int(raw_shopper_id) shopper_body = shoppers[shopper_id] except (KeyError, ValueError): return (403, {}, 'User: %s is not authorized to place an order for ' 'shopper: %s.' % (_client.username, raw_shopper_id)) try: credit_card = ordering_shopper['credit-card'] except KeyError: return (400, {'content-type': 'application/xml'}, mock_responses['order_failed__no_payment_method']) # Validate credit card selection card_found = False for credit_card_info in shopper_body['shopper']['shopper-info'][ 'payment-info']['credit-cards-info']['credit-card-info']: print(dict(credit_card_info['credit-card']), dict(credit_card)) if dict(credit_card_info['credit-card']) == dict(credit_card): card_found = True break if not card_found: return (400, {'content-type': 'application/xml'}, mock_responses['order_failed__wrong_payment_details']) cart = order['cart'] sku = cart['cart-item']['sku'] assert sku['sku-id'] == helper.TEST_PRODUCT_SKU_ID sku_charge_price = sku['sku-charge-price'] cart['charged-currency'] = sku_charge_price['currency'] cart['cart-item']['item-sub-total'] = sku_charge_price['amount'] cart['tax'] = '0.00' cart['tax-rate'] = '0' cart['total-cart-cost'] = sku_charge_price['amount'] order['post-sale-info'] = { 'invoices': { 'invoice': { 'invoice-id': 'invoice_%d' % new_order_id, 'url': 'https://sandbox.bluesnap.com/jsp/show_invoice.jsp', 'financial-transactions': { 'financial-transaction': { 'status': 'Pending', 'date-due': order_date, 'date-created': order_date, 'amount': sku_charge_price['amount'], 'currency': sku_charge_price['currency'], 'soft-descriptor': 'BLS*%s' % order[ 'soft-descriptor'], 'payment-method': 'Credit Card', 'target-balance': 'PLIMUS_ACCOUNT', 'credit-card': order['ordering-shopper'][ 'credit-card'], 'paypal-transaction-data': None, 'skus': { 'sku': { 'sku-id': sku['sku-id'], }, }, } } } } } orders[new_order_id] = body return (200, {'content-type': 'application/xml'}, xmltodict.unparse(body))
def down_youporn_categorie_xml(categorie, url, rss_name): __headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.103 Safari/537.36' } response = requests.get(url=url, headers=__headers) html_text = str(response.content, encoding='utf-8') dom = etree.HTML(html_text) # 获取 a标签下的文本 //body/div/div[3]/div/div[4]/div[2]/div/div/a/span/img Hrefs = dom.xpath('//body/div/div[3]/div/div[4]/div[2]/div/div/a') ''' <item> <guid>Thu, 11 Jul 2019 19:00:52 GMT</guid> <title>Using My Big Vibrator As a D***o</title> <description>&lt;a href="https://www.youporn.com/watch/15251363/using-my-big-vibrator-as-a-d***o/" /&gt;&lt;/a&gt; &lt;img border="1" src="http://192.168.127.254:8080/infytb/img/2.jpg" /&gt; &lt;/a&gt; &lt;br/&gt; Length: 20:55 &lt;br/&gt; Keywords: amateur anal big butt big t**s solo girl milf webcam dildos/toys european masturbation hd</description> <pubDate>Thu, 11 Jul 2019 19:00:52 GMT</pubDate> <img>http://192.168.127.254:8080/infytb/img/2.jpg</img> <link>https://www.youporn.com/watch/15251363/using-my-big-vibrator-as-a-d***o/</link> </item> ''' rss = {'rss': {'channel': {'item': []}}} # try: for item in Hrefs: item_data = { 'title': "", 'link': "", 'description': '''<br/> <img border="1" src="{}" /> Length:{}<br/> Keywords:{}''', 'guid': "", 'pubDate': "Sun, 14 Jul 2019 11:00:56 GMT" } link = str(item.xpath('./@href')[0]) item_data['title'] = str( item.xpath('div[@class="video-box-title"]/text()')[0]).strip() img_url = str(item.xpath('span/img/@data-thumbnail')[0]) if 'http' not in link: item_data['link'] = youporn_head + link else: item_data['link'] = link item_data['guid'] = item_data['link'] print("title:{} \n img:{} \n link:{}\n ".format( item_data['title'], img_url, item_data['link'])) duration = str( item.xpath('../*/div[@class="video-duration"]/text()') [0]).strip() # <div class="video-duration">12:35</div> item_data['description'] = item_data['description'].format( img_url, duration, item_data['title']) rss['rss']['channel']['item'].append(item_data) print(rss) xml = '' try: xml = xmltodict.unparse(rss, encoding='utf-8') print(xml) # except: # xml = xmltodict.unparse({'request': channel}, encoding='utf-8') # print(xml) finally: with codecs.open(rss_name, 'w', 'utf-8') as f: f.write(xml) return True
def writeXML(self, outpath): print("Writting to: " + outpath) f = open(outpath, 'w') f.write(xmltodict.unparse(self.xml, pretty=True)) f.close() print("Saved !")
#Iterate through all the .json files to convert in xml and encrypt the same for sFiles in lJSONExtFiles: #Replace "/" with the "\" replaced_filename = sFiles.replace("/", "\\") #Get only the file name sOnlyFileName = str(replaced_filename.split("\\", -1)[-1]).replace(".json", "") #Read the .json file with open(sFiles, 'r') as f: jsonString = f.read() #Convert it into .xml file xmlString = xmltodict.unparse(json.loads(jsonString), pretty=True) print("File " + str(sOnlyFileName) + " is converted into .xml format.\n") #Write it in a new file with open( str(sNewDirForEncryptFiles) + "/" + str(sOnlyFileName) + '.xml', 'w') as sNewfile: sNewfile.write(xmlString) sNewfile.close() #Encrypt the .xml file and re-write within the same file xmlEncrypt = open( str(sNewDirForEncryptFiles) + "/" + str(sOnlyFileName) + '.xml', "rb").read() sEncodedStructure = base64.b64encode(xmlEncrypt).decode('utf-8')
def wechat(): """验证服务器地址的有效性""" # 开发者提交信息后,微信服务器将发送GET请求到填写的服务器地址URL上,GET请求携带四个参数: # signature:微信加密, signature结合了开发者填写的token参数和请求中的timestamp参数 nonce参数 # timestamp:时间戳(chuo这是拼音) # nonce: 随机数 # echostr: 随机字符串 # 接收微信服务器发送参数 signature = request.args.get("signature") timestamp = request.args.get("timestamp") nonce = request.args.get("nonce") # 校验参数 # 校验流程: # 将token、timestamp、nonce三个参数进行字典序排序 # 将三个参数字符串拼接成一个字符串进行sha1加密 # 开发者获得加密后的字符串可与signature对比,标识该请求来源于微信 if not all([signature, timestamp, nonce]): # 抛出400错误 abort(400) # 按照微信的流程计算签名 li = [WECHAT_TOKEN, timestamp, nonce] # 排序 li.sort() # 拼接字符串 tmp_str = "".join(li) tmp_str = tmp_str.encode('utf-8') # 进行sha1加密, 得到正确的签名值 sign = hashlib.sha1(tmp_str).hexdigest() # 将自己计算的签名值, 与请求的签名参数进行对比, 如果相同, 则证明请求来自微信 if signature != sign: # 代表请求不是来自微信 # 弹出报错信息, 身份有问题 abort(403) else: # 表示是微信发送的请求 if request.method == "GET": # 表示第一次接入微信服务器的验证 echostr = request.args.get("echostr") # 校验echostr if not echostr: abort(400) return echostr elif request.method == "POST": # 表示微信服务器转发消息过来 # 拿去xml的请求数据 xml_str = request.data # 当xml_str为空时 if not xml_str: abort(400) # 对xml字符串进行解析成字典 xml_dict = xmltodict.parse(xml_str) xml_dict = xml_dict.get("xml") # print(xml_dict) # MsgType是消息类型 这里是提取消息类型 msg_type = xml_dict.get("MsgType") if msg_type == "text": # 表示发送文本消息 # 够造返回值, 经由微信服务器回复给用户的消息内容 # 回复消息 # ToUsername: (必须传) 接收方账号(收到的OpenID) # FromUserName: (必须传) 开发者微信号 # CreateTime: (必须传) 消息创建时间(整形) # MsgType: (必须传) 消息类型 # Content: (必须传) 回复消息的内容(换行:在Content中能够换行, 微信客户端就支持换行显示) # 我们自己的消息处理逻辑 user_name = xml_dict.get("FromUserName") text = xml_dict.get("Content") print("text:", text) reply = response(user_name, text) resp_dict = { "xml": { "ToUserName": xml_dict.get("FromUserName"), "FromUserName": xml_dict.get("ToUserName"), "CreateTime": int(time.time()), "MsgType": "text", "Content": reply } } else: if msg_type == 'image': msg = parse_message(xml_str) media_id = '6QMxv1WgvAmt_9YJMA9zgmG2QUnr-8M2xPErDHCllWrbvmM_YASURaPS0rTDewta' reply = ImageReply(media_id=media_id, message=msg) xml = reply.render() return xml resp_dict = { "xml": { "ToUserName": xml_dict.get("FromUserName"), "FromUserName": xml_dict.get("ToUserName"), "CreateTime": int(time.time()), "MsgType": "text", "Content": "对不起,不能识别您发的内容!" } } # 将字典转换为xml字符串 resp_xml_str = xmltodict.unparse(resp_dict) # 返回消息数据给微信服务器 return resp_xml_str
def parse_detail_page(self, page, link): print 3 doc_id = link.split('/')[ -1] #http://openlaw.cn/judgement/ea86414b0cac4075a3b88fcd9f8d4139 d = pq(page) #判断是否出现验证码 try: if u'请输入验证码' in d.text(): print u'等待输入验证码 > ' imgfoler = '/img/' basic = 'img' webimage = imgfoler + utility.get_unique_name() uniquename = basic + webimage self.driver.save_screenshot(uniquename + '_s.png') #截屏 _s.png captcha_image = self.driver.find_element_by_xpath( '//img[@id="kaptcha"]') loc = captcha_image.location loc['x'] = int(loc['x']) loc['y'] = int(loc['y']) image = cv.LoadImage(uniquename + '_s.png', True) out = cv.CreateImage((200, 50), image.depth, 3) cv.SetImageROI(image, (loc['x'], loc['y'], 200, 50)) cv.Resize(image, out) imgname = uniquename + '.jpg' cv.SaveImage(imgname, out) # 使用外部服务解码 result = captchaservice.getCaptcha(imgname) dictresult = json.loads(result) if dictresult.has_key('Error'): resultno = 1 raise Exception('service does not work well !') #endif code = dictresult['Result'] inputkey = self.driver.find_element_by_xpath( '//input[@class="search-field"]') inputkey.clear() inputkey.send_keys(code) time.sleep(2) searchbtn = self.driver.find_element_by_xpath( '//input[@type="submit"]') searchbtn.click() time.sleep(10) except: pass data = {} title = d('h2.entry-title').text() if '404' in d('title').text(): print ' [!] ERROR page, 404 not found, %s' % link return if not title: print ' [!] Empty page, resend %s' % link #如果页面为空,则将链接再发送到队列 self.channel.basic_publish( exchange='', routing_key='doc_queue', body=link, properties=pika.BasicProperties( delivery_mode=2, # 使消息持久化 )) time.sleep(.5) return #print title print 4 #提取结构化信息 侧边栏(sidebar) reason = trim_colon( d('aside#sidebar section').eq(0).find('li').filter( lambda i: u'案由' in pq(this).text()).text()) court = trim_colon( d('aside#sidebar section').eq(0).find('li').filter( lambda i: u'法院' in pq(this).text()).text()) doc_type = trim_colon( d('aside#sidebar section').eq(0).find('li').filter( lambda i: u'类型' in pq(this).text()).text()) status = trim_colon( d('aside#sidebar section').eq(0).find('li').filter( lambda i: u'程序' in pq(this).text()).text()) date = trim_colon(d('li.ht-kb-em-date').text()).strip() #strip() 去前后空格 regx = re.match(r'\d{4}-\d{2}-\d{2}', date) if not regx: date = '1970-01-01' case_id = trim_colon(d('li.ht-kb-em-category').text()) content = d('div#entry-cont').text().strip(u' 允许所有人 查看 该批注 \ 允许所有人 编辑 该批注 取消 保存 Annotate') # 人物侧边栏 persons = d('aside#sidebar section').eq(1) # 原告 accuser = filter_person(persons, [u'原告', u'审请人', u'上诉人', u'再审申请人']) # 被告 accused = filter_person(persons, [u'被告', u'被审请人', u'被上诉人']) # 审判长 chief_judge = filter_person(persons, [u'审判长']) # 律师 lawyers = filter_lawyers(persons) data['title'] = title data['title_search'] = title data['reason'] = reason data['court'] = court data['date'] = date data['doc_type'] = doc_type data['status'] = status data['content'] = content data['case_id'] = case_id data['lawyers'] = lawyers data['accuser'] = accuser data['accused'] = accused data['chief_judge'] = chief_judge data['url'] = link #导入elasticsearch #self.es.index(index=ES_INDEX, doc_type=ES_TYPE, id=doc_id, body=data) #print 'data',data #convertedDict = xmltodict.parse(data); realpath = link extraction = {} extraction['realpath'] = realpath extraction['data'] = data data1 = {} data1['extraction'] = extraction convertedXml = xmltodict.unparse(data1) # print "convertedXml=",convertedXml; try: folder = './result/' filename = folder + data['case_id'] + '.xml' f = open(filename, 'w') f.write(convertedXml) f.close() except: print 'error...'
def MotecXMLexport(self, rootPath=str, MotecPath=str) -> None: """ Exports all calculated vehilce parameters to a Motec readable XML file :param rootPath: Path to project directory :param MotecPath: Path to Motec project directory :return: """ def write2XML(name, value, unit): BDone = False for exp in doc['Maths']['MathConstants']['MathConstant']: if exp['@Name'] == name: exp['@Value'] = str(value) exp['@Unit'] = unit BDone = True if not BDone: OD = OrderedDict() OD['@Name'] = name OD['@Value'] = str(value) OD['@Unit'] = unit doc['Maths']['MathConstants']['MathConstant'].append(OD) # read default xml with open(rootPath + '/files/default.xml') as fd: doc = xmltodict.parse(fd.read()) # populate struct with new values doc['Maths']['@Id'] = self.carPath doc['Maths']['@Condition'] = '\'Vehicle Id\' == "{}"'.format( self.carPath) # shift RPM if self.UpshiftSettings['BValid']: for i in range(0, self.NGearMax): write2XML('nMotorShiftGear{}'.format(i + 1), self.UpshiftSettings['nMotorShiftOptimal'][i], 'rpm') # gear ratios if self.UpshiftSettings['BValid'] or self.Coasting['BValid']: for i in range(1, self.NGearMax + 1): write2XML('rGear{}'.format(i), self.rGearRatios[i], 'ratio') # coasting if self.Coasting['BValid']: for i in range(0, self.NGearMax + 1): for k in range(0, len(self.Coasting['gLongCoastPolyFit'][i])): write2XML('gLongCoastGear{}Poly{}'.format(i, k), self.Coasting['gLongCoastPolyFit'][i][k], '') # brake line pressure write2XML('pBrakeFMax', self.pBrakeFMax, 'bar') write2XML('pBrakeRMax', self.pBrakeRMax, 'bar') # export xml xmlString = xmltodict.unparse(doc, pretty=True) f = open(MotecPath + '/Maths/{}.xml'.format(self.name), "w") f.write(xmlString) f.close() print( time.strftime("%H:%M:%S", time.localtime()) + ':\tExported Motec XML file: {}.xml'.format(self.name))
def check_redbag(self, content): sql = "select uid,reward_value from reward_record where key_code = %s and status = 0 and item_id = 4" res = self.db.get(sql, content) if not res: log_info(content) return uid = res.get('uid') reward_value = int(res.get('reward_value')) # 单笔低于1元使用企业付款到零钱 print '----------reward value---------------------------------------' print reward_value print type(reward_value) print '-------------------------------------------------' if int(reward_value) < 100: self.enterprise_pay(content, uid, reward_value) return nonce_str = "".join( random.sample('zyxwvutsrqponmlkjihgfedcba123456789', 32)) mch_billno = "".join( random.sample('zyxwvutsrqponmlkjihgfedcba123456789', 28)) mch_id = WECHAT_MCHID wxappid = WECHAT_APP_ID send_name = "东胜游戏" re_openid = self.openid total_amount = reward_value #100 total_num = 1 wishing = "恭喜发财!" client_ip = "120.79.229.113" act_name = "东胜棋牌" remark = "no" #scene_id = "PRODUCT_3" key = WECHAT_KEY send_list = [ 'nonce_str', 'mch_billno', 'mch_id', 'wxappid', 'send_name', 're_openid', 'total_amount', 'total_num', 'wishing', 'client_ip', 'act_name', 'remark', #'scene_id' ] send_list.sort() stringA = "" for val in send_list: stringA += val + "=" + str(eval(val)) + "&" stringA += "key=" + key m = hashlib.md5() m.update(stringA) sign = m.hexdigest().upper() send_list.append('sign') dic = {} for s in send_list: dic[s] = str(eval(s)) dic = {"xml": dic} data = xmltodict.unparse(dic, pretty=True) url = "https://api.mch.weixin.qq.com/mmpaymkttransfers/sendredpack" print data client = AsyncHTTPClient() req = HTTPRequest(url, method="POST", body=data, client_key=self._key_pem, client_cert=self._cert_pem) resp = yield client.fetch(req) print '---------resp body-----------------' print resp.body print '--------------------------' redbag_log(uid, content, resp.body) res_dic = xmltodict.parse(resp.body) self.check_success(res_dic, uid, content)
def parse(msg, line, header_in, opts): # these varibles should be wset somehwere - not need here for noe, but needed for DM # asr_mode = 'utt' # asr_nbest = 3 # Is there a new utterance? new_utt = "RESULT:NUM" in msg # If yes, then process header to get output info if new_utt == True: #header = msg.split(',') header = re.split('[, \n]', msg) if not ("RESULT:NUM" in header[0] and len(header) >= 4 and len(header[3].split("=")) > 1): header = header_in #if "RESULT:NUM" in header[0]: # If idur = 0 then ignore header (same transcription) #if header[3].split("=")[1] == "0": # header = header_in else: header = header_in line = line + msg xml_line = '' # Test if partial or complete results was received end_reco = "RESULT:DONE" in line part_reco = "RESULT:PART" in line done = any([end_reco, part_reco]) # If it was , then process and output the transcription if done: tokens = line.split('\n') # Process transcription in string format line = '' for token in tokens: if not any((c in ':,-=.') for c in token): line = line + token + ' ' line = line.strip() # Process transcription in json format asr_data = {} #asr_data['content'] = "ASR_output" asr_data['language'] = opts['language'] asr_data['nbest'] = opts['nbest'] asr_data['mode'] = opts['mode'] asr_data['partial'] = part_reco #asr_data['istart'] = header[4].split("=")[1] #asr_data['iend'] = header[5].split("=")[1].strip() asr_data['rdur'] = header[2].split("=")[1] asr_data['idur'] = header[3].split("=")[1] tokens_nbest = re.split('[(*)]', line) transcriptions = [] if (part_reco == True and opts['mode'] == 'inc') or opts['nbest'] == 1: if not tokens_nbest[0].strip() == "": transcriptions.append({ 'id': '0', 'nwords': len(tokens_nbest[0].strip().split(" ")), 'text': tokens_nbest[0].strip(), }) else: for token_nbest_idx in range(0, len(tokens_nbest) - 1, 2): #if not tokens_nbest[token_nbest_idx].strip() == "": transcriptions.append({ 'id': tokens_nbest[token_nbest_idx + 1].strip(), 'nwords': len(tokens_nbest[token_nbest_idx].strip().split(" ")), 'text': tokens_nbest[token_nbest_idx].strip(), }) asr_data['transcriptions'] = transcriptions # Output transcriptions if len(transcriptions) > 0: #json_line = json.dumps(asr_data, indent=2, sort_keys=False) xml_line = xmltodict.unparse({'ASR_output': asr_data}, full_document=False) #print(xml_line) return line, xml_line, done, header
def forward_request(self, method, path, data, headers): modified_data = None # If this request contains streaming v4 authentication signatures, strip them from the message # Related isse: https://github.com/localstack/localstack/issues/98 # TODO we should evaluate whether to replace moto s3 with scality/S3: # https://github.com/scality/S3/issues/237 if headers.get('x-amz-content-sha256') == 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD': modified_data = strip_chunk_signatures(data) # POST requests to S3 may include a "${filename}" placeholder in the # key, which should be replaced with an actual file name before storing. if method == 'POST': original_data = modified_data or data expanded_data = multipart_content.expand_multipart_filename(original_data, headers) if expanded_data is not original_data: modified_data = expanded_data # If no content-type is provided, 'binary/octet-stream' should be used # src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html if method == 'PUT' and not headers.get('content-type'): headers['content-type'] = 'binary/octet-stream' # persist this API call to disk persistence.record('s3', method, path, data, headers) parsed = urlparse.urlparse(path) query = parsed.query path = parsed.path bucket = path.split('/')[1] query_map = urlparse.parse_qs(query) if query == 'notification' or 'notification' in query_map: response = Response() response.status_code = 200 if method == 'GET': # TODO check if bucket exists result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3 if bucket in S3_NOTIFICATIONS: notif = S3_NOTIFICATIONS[bucket] for dest in NOTIFICATION_DESTINATION_TYPES: if dest in notif: dest_dict = { '%sConfiguration' % dest: { 'Id': uuid.uuid4(), dest: notif[dest], 'Event': notif['Event'], 'Filter': notif['Filter'] } } result += xmltodict.unparse(dest_dict, full_document=False) result += '</NotificationConfiguration>' response._content = result if method == 'PUT': parsed = xmltodict.parse(data) notif_config = parsed.get('NotificationConfiguration') S3_NOTIFICATIONS.pop(bucket, None) for dest in NOTIFICATION_DESTINATION_TYPES: config = notif_config.get('%sConfiguration' % (dest)) if config: events = config.get('Event') if isinstance(events, six.string_types): events = [events] event_filter = config.get('Filter', {}) # make sure FilterRule is an array s3_filter = _get_s3_filter(event_filter) if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list): s3_filter['FilterRule'] = [s3_filter['FilterRule']] # create final details dict notification_details = { 'Id': config.get('Id'), 'Event': events, dest: config.get(dest), 'Filter': event_filter } # TODO: what if we have multiple destinations - would we overwrite the config? S3_NOTIFICATIONS[bucket] = clone(notification_details) # return response for ?notification request return response if query == 'cors' or 'cors' in query_map: if method == 'GET': return get_cors(bucket) if method == 'PUT': return set_cors(bucket, data) if method == 'DELETE': return delete_cors(bucket) if query == 'lifecycle' or 'lifecycle' in query_map: if method == 'GET': return get_lifecycle(bucket) if method == 'PUT': return set_lifecycle(bucket, data) if modified_data: return Request(data=modified_data, headers=headers, method=method) return True
def get(): if request.method == "GET": # 判断请求方式是GET请求 my_echostr = request.args.get('echostr') # 获取携带的echostr参数 return my_echostr else: # 表示微信服务器转发消息过来 xml_str = request.data if not xml_str: return "" resp_dict = None re_content = "信息错误" # 对xml字符串进行解析 xml_dict = xmltodict.parse(xml_str) xml_dict = xml_dict.get("xml") # 提取消息类型 msg_type = xml_dict.get("MsgType") if msg_type == "text": # 表示发送的是文本消息 content = xml_dict.get("Content") if content == "清除缓存": re_content = "缓存已清除" elif content == "查询已订阅股票": result = user_db_sheet.find(filter={'wechat': xml_dict.get("FromUserName")}) if result: re_content = str(result[0]['stocks']) else: re_content = "尚未绑定微信" elif re.fullmatch(r'\d{6}\.\w{2}', content): re_content = "code: " + content elif content.startswith("查询 "): try: datas = content.split(" ") stock_id = datas[1] result = user_db_sheet.find(filter={'wechat': xml_dict.get("FromUserName")}) if result: user_name = result[0]['_id'] re_len = send_one(result[0], stock_id) re_content = "发送成功: {} {} {}".format(user_name, stock_id, re_len) else: re_content = "尚未绑定微信" except Exception as e: re_content = "发送失败:" + str(e) elif content.startswith("绑定 "): datas = content.split(" ") user_name = datas[1] # 此处逻辑需要细考 if user_db_sheet.find(filter={'_id': user_name}): re_content = "您要绑定的用户名:{},已被人绑定!请联系微信435878393".format(user_name) elif user_db_sheet.find(filter={'wechat': xml_dict.get("FromUserName")}): re_content = "您的微信已被绑定!请联系微信435878393" else: if user_db_sheet.insert({'_id': user_name, 'wechat': xml_dict.get("FromUserName")}): re_content = "绑定成功" else: re_content = "绑定失败" elif content.startswith("订阅 "): datas = content.split(" ") stock_id = datas[1] result = user_db_sheet.find(filter={'wechat': xml_dict.get("FromUserName")}) if result: data = result[0] if 'stocks' not in data.keys(): data['stocks'] = [] stocks = set(data['stocks']) stocks.add(stock_id) user_db_sheet.update_one(filter={'wechat': xml_dict.get("FromUserName")}, update={'$set': {'stocks': list(stocks)}}) re_content = "订阅成功" else: re_content = "尚未绑定微信" elif content.startswith("取消订阅 "): datas = content.split(" ") stock_id = datas[1] result = user_db_sheet.find(filter={'wechat': xml_dict.get("FromUserName")}) if result: data = result[0] if 'stocks' not in data.keys(): data['stocks'] = [] stocks = set(data['stocks']) if stock_id in stocks: stocks.remove(stock_id) user_db_sheet.update_one(filter={'wechat': xml_dict.get("FromUserName")}, update={'$set': {'stocks': list(stocks)}}) re_content = "取消订阅成功" else: re_content = f"尚未订阅{stock_id}" else: re_content = "尚未绑定微信" elif content.startswith("下载 "): url = content[3:] download = aria2.add(url)[0] re_content = download.status else: re_content = content if not resp_dict: # 构造返回值,经由微信服务器回复给用户的消息内容 resp_dict = { "xml": { "ToUserName": xml_dict.get("FromUserName"), "FromUserName": xml_dict.get("ToUserName"), "CreateTime": int(time.time()), "MsgType": "text", "Content": re_content, } } # 将字典转换为xml字符串 resp_xml_str = xmltodict.unparse(resp_dict) # 返回消息数据给微信服务器 return resp_xml_str
def forward_request(self, method, path, data, headers): parsed_url = urlparse(path) action = parsed_url.path.split('/')[2] is_associate = path.endswith('/associatevpc') if is_associate or path.endswith('/disassociatevpc'): path_parts = path.split('/') zone_id = path_parts[3] req_data = xmltodict.parse(to_str(data)) region_details = Route53Backend.get() zone_details = region_details.vpc_hosted_zone_associations.get( zone_id) or [] if is_associate: assoc_id = short_uid() zone_data = req_data.get('AssociateVPCWithHostedZoneRequest', {}) zone_data['Id'] = assoc_id zone_data['HostedZoneId'] = zone_id zone_details.append(zone_data) response_entry = { 'ChangeInfo': { 'Id': assoc_id, 'Status': 'INSYNC', 'SubmittedAt': timestamp_millis() } } else: def _match(z): return z['HostedZoneId'] == zone_id and z['VPC'][ 'VPCId'] == zone_data['VPC']['VPCId'] zone_data = req_data.get( 'DisassociateVPCFromHostedZoneRequest', {}) response_entry = [z for z in zone_details if _match(z)] zone_details = [z for z in zone_details if not _match(z)] if not response_entry: return 404 response_entry = response_entry[0] region_details.vpc_hosted_zone_associations[zone_id] = zone_details response_tag = '%sVPCWithHostedZoneResponse' % ( 'Associate' if is_associate else 'Disassociate') response = {response_tag: response_entry} body = xmltodict.unparse(response) response = requests_response(body) return response if action == 'change': if method == 'GET': resource_id = parsed_url.path.split('/')[-1] change_response = { 'GetChangeResponse': { 'ChangeInfo': { 'Id': resource_id, 'Status': 'INSYNC', 'SubmittedAt': timestamp_millis() } } } body = xmltodict.unparse(change_response) response = requests_response(body) return response return True
def metadata_to_xml(identifier, title, creator, publisher, publisher_year, **kwargs): '''Pass in variables and return XML in the format ready to send to DataCite API :param identifier: DOI :param title: A descriptive name for the resource :param creator: The author or producer of the data. There may be multiple Creators, in which case they should be listed in order of priority :param publisher: The data holder. This is usually the repository or data centre in which the data is stored :param publisher_year: The year when the data was (or will be) made publicly available. :param kwargs: optional metadata :param **kwargs: ''' # Make sure a var is a list so we can easily loop through it # Useful for properties were multiple is optional def _ensure_list(var): ''' :param var: ''' return var if isinstance(var, list) else [var] # Encode title ready for posting title = title.encode(u'unicode-escape') # Optional metadata properties subject = kwargs.get(u'subject') description = kwargs.get(u'description').encode(u'unicode-escape') size = kwargs.get(u'size') format = kwargs.get(u'format') version = kwargs.get(u'version') rights = kwargs.get(u'rights') geo_point = kwargs.get(u'geo_point') geo_box = kwargs.get(u'geo_box') # Optional metadata properties, with defaults resource_type = kwargs.get(u'resource_type', u'Dataset') language = kwargs.get(u'language', u'eng') # Create basic metadata with mandatory metadata properties xml_dict = { u'resource': { u'@xmlns': u'http://datacite.org/schema/kernel-3', u'@xmlns:xsi': u'http://www.w3.org/2001/XMLSchema-instance', u'@xsi:schemaLocation': u'http://datacite.org/schema/kernel-3 ' u'http://schema.datacite.org/meta/kernel-3/metadata.xsd', u'identifier': { u'@identifierType': u'DOI', u'#text': identifier }, u'titles': { u'title': { u'#text': title } }, u'creators': { u'creator': [{ u'creatorName': c.encode(u'unicode-escape') } for c in _ensure_list(creator)], }, u'publisher': publisher, u'publicationYear': publisher_year, } } # Add subject (if it exists) if subject: xml_dict[u'resource'][u'subjects'] = { u'subject': [c for c in _ensure_list(subject)] } if description: xml_dict[u'resource'][u'descriptions'] = { u'description': { u'@descriptionType': u'Abstract', u'#text': description } } if size: xml_dict[u'resource'][u'sizes'] = {u'size': size} if format: xml_dict[u'resource'][u'formats'] = {u'format': format} if version: xml_dict[u'resource'][u'version'] = version if rights: xml_dict[u'resource'][u'rightsList'] = {u'rights': rights} if resource_type: xml_dict[u'resource'][u'resourceType'] = { u'@resourceTypeGeneral': u'Dataset', u'#text': resource_type } if language: xml_dict[u'resource'][u'language'] = language if geo_point: xml_dict[u'resource'][u'geoLocations'] = { u'geoLocation': { u'geoLocationPoint': geo_point } } if geo_box: xml_dict[u'resource'][u'geoLocations'] = { u'geoLocation': { u'geoLocationBox': geo_box } } for plugin in PluginImplementations(IDoi): xml_dict = plugin.metadata_to_xml(xml_dict, kwargs) return unparse(xml_dict, pretty=True, full_document=False)
def test_root(self): obj = {'a': None} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj))))
def createJob(self, id, app_spec, overwrite=False, skip_image_push=False): # format https://github.com/user/repo.git#v1.0 version = app_spec["version"] source = app_spec.get("source", None) if not source: raise Exception("field source empty") ### run_test = "\'echo No test defined\'" run_entrypoint = "" test = app_spec.get("testing") if test: test_command = test.get("command") if "mask_entrypoint" in test.keys() and test.get( "mask_entrypoint"): run_entrypoint = ' --entrypoint=\'\'' #if entrypoint_command: # all_entrypoint_command = " ".join(entrypoint_command) # run_entrypoint = "\'" + all_entrypoint_command + "\'" if test_command: all_test_command = " ".join(test_command) run_test = "\'" + all_test_command + "\'" # t = " \' rm -rf \' " #sourceArray = source.split("#", 3) git_url = source.get("url", "") git_branch = source.get("tag", "") if git_branch == None or git_branch == "": git_branch = source.get("branch", "") if git_branch == None or git_branch == "": raise Exception("neither tag nor branch specified") else: git_branch = f"refs/tags/{git_branch}" git_directory = source.get("directory", ".") if git_directory.startswith("/"): git_directory = git_directory[1:] git_dockerfile = source.get("dockerfile", "./Dockerfile") platforms = source.get("architectures", []) if len(platforms) == 0: raise Exception("No architectures specified") platforms_str = ",".join(platforms) platforms_list = " ".join(platforms) build_args = source.get("build_args", {}) build_args_command_line = "" for key in build_args: value = build_args[key] build_args_command_line += f" --build-arg {key}={value}" if docker_build_args != "": build_args_command_line += f" {docker_build_args}" actual_namespace = "" namespace = app_spec.get("namespace", "") if len(namespace) > 0: actual_namespace = namespace else: actual_namespace = app_spec.get("owner", "") # The registry user credentials are defined in the casc_jenkins.yaml file. docker_login = '''withCredentials([usernamePassword(credentialsId: 'registry-user', passwordVariable: 'REGISTRY_USER_PWD', usernameVariable: 'REGISTRY_USERNAME')]) { sh 'echo $REGISTRY_USER_PWD | docker login -u $REGISTRY_USERNAME --password-stdin ''' + docker_registry_url + '''' } ''' do_push = "--push" if skip_image_push: docker_login = "" do_push = "" name = app_spec["name"] jenkinsfileTemplate = "" if test: jenkinsfileTemplate = jenkinsfileTemplatePrefix + jenkinsfileTemplateTestStage + jenkinsfileTemplateSuffix else: jenkinsfileTemplate = jenkinsfileTemplatePrefix + jenkinsfileTemplateSuffix template = Template(jenkinsfileTemplate) try: jenkinsfile = template.substitute( url=git_url, branch=git_branch, directory=git_directory, dockerfile=git_dockerfile, namespace=actual_namespace, name=name, version=version, platforms=platforms_str, build_args_command_line=build_args_command_line, docker_run_args=docker_run_args, docker_registry_url=docker_registry_url, docker_login=docker_login, command=run_test, platforms_list=platforms, platform=platforms_str, entrypoint=run_entrypoint, do_push=do_push) except Exception as e: raise Exception( f'template failed: url={git_url}, branch={git_branch}, directory={git_directory}, e={str(e)}' ) #print(jenkins.EMPTY_CONFIG_XML) newJob = createPipelineJobConfig(jenkinsfile, f'{actual_namespace}/{name}') print(newJob) newJob_xml = xmltodict.unparse(newJob) #.decode("utf-8") #print("------") #print(newJob_xml) #print("------") #print(jenkins.EMPTY_CONFIG_XML) #print("------") if overwrite: self.server.reconfig_job(id, newJob_xml) else: self.server.create_job(id, newJob_xml) timeout = 10 while True: try: my_job = self.server.get_job_config(id) return my_job except jenkins.NotFoundException as e: # pragma: no cover pass except Exception as e: # pragma: no cover raise if True: # pragma: no cover time.sleep(2) timeout -= 2 if timeout <= 0: raise Exception(f'timout afer job creation') continue return 1
if verbose: print "Successfully created the target dictionary." ### write to the output file if verbose: print "Exporting the dictionary to " + (outfile if not tostdout else "standard output") + "..." if not tostdout: out_fd = codecs.open(outfile, "w", "utf8") else: out_fd = outfile if testing: print out_dict out_xml = xmltodict.unparse(out_dict, encoding='utf-8', pretty=True, indent=' ') del out_dict # we do not need this henceforth if testing: print out_xml try: out_fd.write(codecs.encode(out_xml, "utf8")) finally: if verbose: print "Successfully exported " + str(obtained_size) + " records." out_fd.close() exit(0)
def test_simple_cdata(self): obj = {'a': 'b'} self.assertEqual(obj, parse(unparse(obj))) self.assertEqual(unparse(obj), unparse(parse(unparse(obj))))
def update_filter(self, xml, content_id, filter_id): self.do_request('put', '/contents/%s/filters/%s' % (content_id, filter_id), data=xmltodict.unparse(xml))
def post(self): xml_data = self.request.body #将xml文件格式的数据转化为python字典类型的数据 dict_data = xmltodict.parse(xml_data) #获取消息类型 msg_type = dict_data["xml"]["MsgType"] #如果是文本类型,则返回发送过来的数据 if msg_type == "text": content = dict_data["xml"]["Content"] print "-----" resp_data = { "xml": { "ToUserName": dict_data["xml"]["FromUserName"], "FromUserName": dict_data["xml"]["ToUserName"], "CreateTime": int(time.time()), "MsgType": "text", "Content": content, } } #把字典数据转化成xml格式的数据 self.write(xmltodict.unparse(resp_data)) elif msg_type == "voice": recognition = dict_data["xml"].get("Recognition", u'未识别') print dict_data["xml"] resp_data = { "xml": { "ToUserName": dict_data["xml"]["FromUserName"], "FromUserName": dict_data["xml"]["ToUserName"], "CreateTime": int(time.time()), "MsgType": "text", "Content": recognition, } } self.write(xmltodict.unparse(resp_data)) elif msg_type == "event": if dict_data["xml"]["Event"] == "subscribe": resp_data = { "xml": { "ToUserName": dict_data["xml"]["FromUserName"], "FromUserName": dict_data["xml"]["ToUserName"], "CreateTime": int(time.time()), "MsgType": "text", "Content": u"哈喽,我是洪山,感谢关注!", } } if "EventKey" in dict_data: #用户未关注时,进行关注后的事件推送 eventkey = dict_data["xml"]["EventKey"] sid = eventkey[8:] resp_data["xml"]["Content"] = u"你关注了我哦,%s" % sid self.write(xmltodict.unparse(resp_data)) elif dict_data["xml"]["Event"] == "SCAN": # sid = dict_data["xml"]["EventKey"] resp_data = { "xml": { "ToUserName": dict_data["xml"]["FromUserName"], "FromUserName": dict_data["xml"]["ToUserName"], "CreateTime": int(time.time()), "MsgType": "text", "Content": u"哈喽,你扫描关注了我,%s" % sid } } self.write(xmltodict.unparse(resp_data)) #如果不是文本数据则反回“哈哈,你很皮” else: print "----------------------------------------" resp_data = { "xml": { "ToUserName": dict_data["xml"]["FromUserName"], "FromUserName": dict_data["xml"]["ToUserName"], "CreateTime": int(time.time()), "MsgType": "text", "Content": u"哈哈哈,你很皮欸", } } self.write(xmltodict.unparse(resp_data))
import xmltodict ros_launch = open('ros.launch', 'r') entrada = xmltodict.parse(ros_launch.read()) for item in entrada['launch']['arg']: for v in configDefault.values(): for j, i in v.items(): if item['@name'] == j: item['@default'] = i # for item in [entrada['launch']['arg'],]: # if item['@name'] == 'rpm': # item['@default'] = '300' entrada = xmltodict.unparse(entrada) print(entrada) # response = urllib3.urlopen(Base_URL + 'status.json') # if response: # status = json.loads(response.read()) # print (status) # ('Motor: %s \nAtividade do Sensor: %s \nTipo de retorno: %s \n'\ # 'Inicio FOV: %s \nTermino FOV: %s \n' % (status['motor']['rpm'], status['laser']['state'], # status['return']['...'], status['start']['...'], status['end']['...'])) # sensor.close() # Ta melhorando :)
import xmltodict xml = "<xml>" "<name>zhangsan</name>" "<age>18</age>" "</xml>" dict = xmltodict.parse(xml) print dict xml2 = xmltodict.unparse(dict) print xml2
def write_xml(file_path, data): with open(file_path, 'w') as f: xmltodict.unparse(input_dict=data, output=f, pretty=True)
def ordered_dump(data, *args, **kwargs): return xmltodict.unparse(data, *args, **kwargs)
# print(xml) finally: with codecs.open(rss_name, 'w', 'utf-8') as f: f.write(xml) return True categorie_rss = {'rss': {'categorie': {'item': []}}} for categorie, data in categories.items(): rss_name = 'youporn_{}_rss.xml'.format(categorie) state = down_youporn_categorie_xml(categorie, data['url'], rss_name) data['state'] = state data['rss'] = rss_name if (state == True): categorie_data = {'title': None, 'link': None} categorie_data['title'] = categorie categorie_data['link'] = "{}/{}".format(youporn_head, rss_name) categorie_rss['rss']['categorie']['item'].append(categorie_data) time.sleep(2.5) print(categorie_rss) xml = '' try: xml = xmltodict.unparse(categorie_rss, encoding='utf-8') print(xml) # except: # xml = xmltodict.unparse({'request': channel}, encoding='utf-8') # print(xml) finally: with codecs.open('youporn_categorie_rss.xml', 'w', 'utf-8') as f: f.write(xml)
def scan(self, url=None, username=None, password=None, resolution=None, size=None, compression=None, brightness=None, contrast=None, color_mode=None, document_format_ext=None, **kwargs): if url is None: url = self.url if resolution is None: resolution = self.resolution if size is None: size = self.size if compression is None: compression = self.compression if brightness is None: brightness = self.brightness if contrast is None: contrast = self.contrast if color_mode is None: color_mode = self.color_mode if document_format_ext is None: document_format_ext = self.document_format_ext if username is None: username = self.username if password is None: password = self.password data = unparse({ 'scan:ScanSettings': { '@xmlns:scan': 'http://schemas.hp.com/imaging/escl/2011/05/03', '@xmlns:copy': 'http://www.hp.com/schemas/imaging/con/copy/2008/07/07', '@xmlns:dd': 'http://www.hp.com/schemas/imaging/con/dictionaries/1.0/', '@xmlns:dd3': 'http://www.hp.com/schemas/imaging/con/dictionaries/2009/04/06', '@xmlns:fw': 'http://www.hp.com/schemas/imaging/con/firewall/2011/01/05', '@xmlns:scc': 'http://schemas.hp.com/imaging/escl/2011/05/03', '@xmlns:pwg': 'http://www.pwg.org/schemas/2010/12/sm', 'pwg:Version': '2.1', 'scan:Intent': 'Document', 'pwg:ScanRegions': { 'pwg:ScanRegion': { 'pwg:Height': int(SIZES[size][0] * resolution), 'pwg:Width': int(SIZES[size][1] * resolution), 'pwg:XOffset': '0', 'pwg:YOffset': '0', }, }, 'pwg:InputSource': 'Platen', 'scan:DocumentFormatExt': document_format_ext, 'scan:XResolution': resolution, 'scan:YResolution': resolution, 'scan:ColorMode': color_mode, 'scan:CompressionFactor': compression, 'scan:Brightness': brightness, 'scan:Contrast': contrast, }, }) auth = (username, password) if password else None response = self.post(url + '/eSCL/ScanJobs', auth=auth, data=data, allow_redirects=False, **kwargs) location = urlunsplit( urlsplit(url)[:2] + urlsplit(response.headers['Location'])[2:]) return self.get(location + '/NextDocument', auth=auth, **kwargs)
def test_generator(self): obj = {'a': {'b': ['1', '2', '3']}} def lazy_obj(): return {'a': {'b': (i for i in ('1', '2', '3'))}} self.assertEqual(obj, parse(unparse(lazy_obj()))) self.assertEqual(unparse(lazy_obj()), unparse(parse(unparse(lazy_obj()))))