def compare_sets(file_list): comparison_list = comparison_tuples(file_list) for tuple in comparison_list: print tuple past_xml, current_xml = open_xml(tuple) past_dict = json.loads(xml2json(past_xml, 0, 1)) current_dict = json.loads(xml2json(current_xml, 0, 1)) differ = DictDiffer(current_dict['Ableton']['LiveSet']['Tracks'], past_dict['Ableton']['LiveSet']['Tracks']) print "ADDED" print json.dumps(differ.added(), indent=4) print "REMOVED" print json.dumps(differ.removed(), indent=4) print "CHANGED" print json.dumps(differ.changed(), indent=4)
def TestResume(self): with open(self.resumeFilePath, "rb") as resume_file: resume_encoded_string = base64.b64encode( resume_file.read()) #read file client = Client(self.RChilliApiUrl) # init soap client print client result = client.service.parseResumeBinary(resume_encoded_string, self.resumeFilePath, self.RChilliUserKey, self.RChilliVersion, self.RChilliSubuserID) #print result result = result.encode('ascii', 'ignore').decode('ascii') #print result options = optparse.Values({"pretty": False}) strip_ns = 1 json = xml2json(result, options, strip_ns) print json self.assertEqual(True, result != None)
def xml_collections_endpoint(**lookup): resource = _resource() response = None method = request_method() if request.content_type.endswith("/xml"): if method == "POST": # response = post(resource, payl=xml2json(request.data)) response = post_internal(resource, payl=xml2json(request.data), skip_validation=True) elif method == "GET": response = collections_endpoint(**lookup) l = json.loads(response.data.decode('utf-8'))['_items'] response.data = xmltodict.unparse({ 'gdxp': { "supplier": list(map(popper, l)) } }) else: raise NotImplementedError('Not implemented') return send_response(resource, response) else: return collections_endpoint(**lookup)
def get(self): if not self.request.get('ean'): return id_type = 'EAN' ean = self.request.get('ean') if len(ean) <13: id_type = 'ISBN' with open("amazon.yml", "r") as config_file: amazon_credentials = yaml.load(config_file) amazon = amazonproduct.api.API(*[amazon_credentials.get(k) for k in ("access_key","secret_key", "locale", "associate_tag")], processor=string_response_parser) #node = amazon.item_search('Books', Publisher='Galileo Press')" #node = amazon.item_lookup('9781593271190',SearchIndex='Books', IdType="EAN", ResponseGroup="Large") node = amazon.item_lookup(ean, SearchIndex='Books', IdType=id_type, ResponseGroup="Small,Reviews,Images") self.response.headers.add_header('Content-Type',"application/json; charset=utf-8") self.response.headers.add_header('Connection',"close") js_string = xml2json.xml2json(node) # The xml tree will have the schema url in the tags, which we don't want js_string = re.sub(r'\{http.*?\}','',js_string) self.response.out.write(js_string) logging.info(type(self.response)) logging.info(type(self.response.out))
def TestResume(self): with open(self.resumeFilePath, "rb") as resume_file: resume_encoded_string = base64.b64encode( resume_file.read()) # read file client = Client(self.HireAbilityApiUrl) # init soap client print client xmlResult = client.service.ParseDocNew(self.HireAbilityProductKey, self.resumeFilePath, resume_encoded_string, "", "", "", "") # get response dictResult = xmltodict.parse(xmlResult) # dictionary jsonResult = json.dumps(dictResult) # json print jsonResult xmlResult = xmlResult.encode('ascii', 'ignore').decode('ascii') print xmlResult options = optparse.Values({"pretty": False}) strip_ns = 1 jsonoutput = xml2json(xmlResult, options, strip_ns) print jsonoutput self.assertEqual(True, xmlResult != None)
def xmltojson(xmlstr): jsonstr = xml2json.xml2json(xmlstr) # print jsonstr # jsonstr = json.dumps(xmlparse, indent=1) import ast dictstr = ast.literal_eval(jsonstr) return dictstr
async def post(self): msg = self.request.body msg_dict = json.loads( xml2json.xml2json(msg.decode(), optparse.Values({'pretty': True}))) logging.error(msg_dict) msg_dict = msg_dict.get('xml', {}) event = msg_dict.get('Event', '') openid = msg_dict.get('FromUserName', '') ktvid_and_clientid = msg_dict.get('EventKey', '') if event not in ('subscribe', 'SCAN') or not ktvid_and_clientid or not openid: return if event == 'SCAN': ktvid_clientid_list = list(map(int, ktvid_and_clientid.split(','))) else: ktvid_clientid_list = list( map(int, ktvid_and_clientid.split('_')[1].split(','))) # 关注发送消息 await utils.async_common_api( '/wx/custom/send', dict(openid=openid, text='欢迎进入收银员等级认证系统', msgtype='text', gzh='lsfwh')) await self.update_cashier(ktvid_clientid_list[0], ktvid_clientid_list[1], openid)
def get_acl_ont(rest_epr,ont_id): content=http_request(rest_epr+ "virtual/ontology/"+ont_id,dict(),API_KEY_AUTH) ont = json.loads(xml2json.xml2json(content)) try: return ont["success"]["data"]["ontologyBean"]["userAcl"] except KeyError, k: return None
def post(self): self.set_header("Content-Type", "application/json") print self.request.body json_data = xml2json(self.request.body.encode('utf8'), options) data = json.loads(json_data)['xml'] return_code = data['return_code'] if return_code == 'SUCCESS': openid = data['openid'] out_trade_no = data['out_trade_no'] cash_fee = data['cash_fee'] where = " openid='%s' and id=%s and (status<>'payed' or status is null) " % (openid, out_trade_no) from webpy_db import SQLLiteral count = pg.update('pay', status='payed', wexin_return=json_data, stat_date=SQLLiteral('NOW()'), where=where) if count != 1: error_info = 'update failure: count=%s where=%s' % (count, where) print error_info raise Exception(error_info) else: wechat = wechat_oper.getWechat() content = '''您支付的 %s 元已进入充值系统,正在向您的油卡充值,请耐心等候......''' % (int(cash_fee) / 100.00) wechat.send_text_message(openid, content) else: print data['return_msg'] #{u'openid': u'oGXiIwHwx_zB8ekXibYjdt3Xb_fE', u'trade_type': u'JSAPI', u'cash_fee': u'1', u'nonce_str': u'798243e4902342c83e833c71141385f', u'return_code': u'SUCCESS', u'is_subscribe': u'Y', u'bank_type': u'CFT', u'mch_id': u'1308443701', u'out_trade_no': u'86', u'result_code': u'SUCCESS', u'total_fee': u'1', u'appid': u'wx907d8a3f50de65db', u'fee_type': u'CNY', u'time_end': u'20160215113326', u'transaction_id': u'1002230516201602153283628055', u'sign': u'CAD12073F45232BB600B8F066B434A30'} success = ''' <xml> <return_code><![CDATA[SUCCESS]]></return_code> <return_msg><![CDATA[OK]]></return_msg> </xml> ''' self.write(success)
def post(self, resource_uri, content): """ ``POST``s a resource by its internal API URI and contents. """ # Attempt to retrieve the response try: response_header, response_content = self.request("%s%s" % (self._xero_api_url, resource_uri), method="POST", body=urllib.urlencode({"xml": content})) except: raise XeroClientRequestException # Check if there is an error: if response_header["status"] == "400": raise XeroClientBadRequestException(response_content) elif response_header["status"] == "404": raise XeroClientNotFoundException() elif response_header["status"] == "501": raise XeroClientNotImplementedException(response_content) elif response_header["status"] != "200": raise XeroClientUnknownException(response_content) # Convert the data into a JSON object: json_string = xml2json.xml2json(response_content) # Convert the json_string to a Python dictionary and return: return json.loads(json_string)
def get_acl_ont(rest_epr, ont_id): content = http_request(rest_epr + "virtual/ontology/" + ont_id, dict(), API_KEY_AUTH) ont = json.loads(xml2json.xml2json(content)) try: return ont["success"]["data"]["ontologyBean"]["userAcl"] except KeyError, k: return None
def __init__(self, xml): """ :param xml: 支付成功回调的XML """ self.xml = xml options = optparse.Values({"pretty": False}) self.xml_json = json.loads(xml2json(self.xml, options))['xml'] self.sign = self.xml_json.get('sign', '')
def get_ligands(cfg,item): try: req = urllib2.Request('http://www.rcsb.org/pdb/rest/ligandInfo?structureId=%s' % item) f = urllib2.urlopen(req) result = f.read() json_string = json.loads(xml2json.xml2json(result,optparse.Values({"pretty": False}),0)) return json_string except: return None
def get_devices(soup): # Return all the devices in a set out = [] tracks = [track for track in soup.find('tracks').children if track != '\n'] for track in tracks: devices = [device for device in track.find('devices').children if device != '\n'] device_dicts = [json.loads(xml2json(str(device), 0, 1)) for device in devices] [out.append(device_dict) for device_dict in device_dicts] return out
def main(): p = optparse.OptionParser( description='Converts Maven metadata XML file to Cohorte Website latest.json JSON file.', prog='latest', usage='%prog -o file.json [url]' ) p.add_option('--out', '-o', help="Write to OUT instead of stdout") options, arguments = p.parse_args() #input = inputstream.read() fp = urllib2.urlopen(arguments[0]) input = fp.read() options.pretty = True out = xml2json.xml2json(input, options, 1, 1) final = {} k = arguments[0].rfind("/") url_path = arguments[0][:k] final["snapshots"] = {} final["releases"] = {} # generate cohorte file json_data = json.loads(out) artifactId = json_data["metadata"]["artifactId"] accepted_extensions = ['zip', 'tar.gz', 'jar'] for i in json_data["metadata"]["versioning"]["snapshotVersions"]["snapshotVersion"]: if any(i["extension"] in s for s in accepted_extensions): if i["extension"] == "jar": suffix = "" else: suffix = "-" + i["classifier"] name = artifactId + suffix if name in final["snapshots"].keys(): # add file only extension = i["extension"] version = i["value"] file_name = artifactId + "-" + version + suffix + "." + extension final["snapshots"][name]["files"][extension] = url_path + "/" + file_name else: # create new entry extension = i["extension"] version = i["value"] file_name = artifactId + "-" + version + suffix + "." + extension final["snapshots"][name] = {} final["snapshots"][name]["version"] = version final["snapshots"][name]["files"] = {} final["snapshots"][name]["files"][extension] = url_path + "/" + file_name if (options.out): file = open(options.out, 'w') file.write(json.dumps(final, sort_keys=True, indent=2, separators=(',', ': '))) file.close() else: print(out)
def xml2mongo(filename, db): # insert xml metadata into mongoDB try: with open(filename) as fin: jout = xml2json.xml2json(fin.read()) # remove unnecessary enclosing tags, and convert into mongoDB default format jout = re.subn(r"(?<=\})\,\s+(?=\{\"\_id)", "\n", jout[24:len(jout)-3])[0] for doc in jout.split('\n'): db.metadata.insert(json.loads(doc)) print "Collection 'metadata' is successfully inserted into 'HTRC' database."
def test_default_namespace_attribute(self): strip_ns = 0 json_string = xml2json.xml2json(xmlstring,options,strip_ns) # check string #self.assertTrue(json_string.find("{http://www.w3.org/TR/html4/}table") != -1) #self.assertTrue(json_string.find("{http://www.w3.org/TR/html4/}tr") != -1) #self.assertTrue(json_string.find("@class") != -1) # check the simple name is not exist json_data = json.loads(json_string)
def download(self): if DEBUG: raw = open(self.name + '.xml').read() else: self.log.info("%s begins to download xml" % self.name) raw = requests.get(self.api).content self.log.info("%s download xml finished %d" % ( self.name, len(raw))) self.json = json.loads(xml2json(raw)) assert self.json
def test_default_namespace_attribute(self): strip_ns = 0 json_string = xml2json.xml2json(xmlstring,options,strip_ns) # check string self.assertTrue(json_string.find("{http://www.w3.org/TR/html4/}table") != -1) self.assertTrue(json_string.find("{http://www.w3.org/TR/html4/}tr") != -1) self.assertTrue(json_string.find("@class") != -1) # check the simple name is not exist json_data = json.loads(json_string) self.assertFalse("table" in json_data["root"])
def get_go_terms(cfg, item): try: req = requests.get( 'http://www.rcsb.org/pdb/rest/goTerms?structureId=%s' % item) #urllib2.Request('http://www.rcsb.org/pdb/rest/goTerms?structureId=%s' % item) #f = urllib2.urlopen(req) result = req.content #f.read() json_string = json.loads( xml2json.xml2json(result, optparse.Values({"pretty": False}), 0)) return json_string except: return None
def convert_xform_to_json(xml_string): """ takes xform payload as xml_string and returns the equivalent json i.e. the json that will show up as xform.form """ try: name, json_form = xml2json.xml2json(xml_string) except xml2json.XMLSyntaxError as e: raise couchforms.XMLSyntaxError(u'Invalid XML: %s' % e) json_form['#type'] = name return json_form
def get_ligands(cfg, item): try: req = requests.get( 'http://www.rcsb.org/pdb/rest/ligandInfo?structureId=%s' % item) #urllib2.Request('http://www.rcsb.org/pdb/rest/ligandInfo?structureId=%s' % item) #f = urllib2.urlopen(req) result = req.content #f.read() json_string = json.loads( xml2json.xml2json(result, optparse.Values({"pretty": False}))) return json_string except: print(traceback.format_exc()) return None
def get_settings(self, *args): if args is (): args = ['all'] self._send_command('getparameters {}'.format(' '.join(args))) xml_text = self._wait_for_reply() options = lambda: None options.pretty = False json_text = xml2json.xml2json(xml_text, options) settings = json.loads(json_text) settings = settings.pop('QTM_Parameters_Ver_' + self.requested_version) return settings
def make_set_file(file_list): out = [] for file in file_list: #print file f = open(file, 'r') xml = f.read() #print xml try: dict =json.loads(xml2json(xml, 0, 1)) dict['@XMLFilename'] = file out.append(dict) except Exception as e: print e return out
def _test(self, filename): with open( os.path.join(os.path.dirname(__file__), 'data', '{0}.json'.format(filename)), encoding='utf-8' ) as f: json_form = json.load(f) with open( os.path.join(os.path.dirname(__file__), 'data', '{0}.xml'.format(filename)), encoding='utf-8' ) as f: xml_form = f.read() name, result = xml2json(xml_form) result['#type'] = name self.assertEqual(result, json_form)
def getvalue(): shell_command = 'dxdiag /x d:\hard.xml' status = subprocess.call(shell_command, shell=True) #time.sleep(5); if status != 0: value_dic = {'status': stauts} else: if os.path.exists('d:\hard.xml'): asset_raw_data = file('d:\hard.xml').read() asset_to_json = xml2json.xml2json(asset_raw_data) asset_to_dict = json.loads(asset_to_json) value_dic = {'asset': asset_to_dict, 'status': status} else: value_dic = {'status': 1} return value_dic
def getvalue(): shell_command = "dxdiag /x d:\hard.xml" status = subprocess.call(shell_command, shell=True) # time.sleep(5); if status != 0: value_dic = {"status": stauts} else: if os.path.exists("d:\hard.xml"): asset_raw_data = file("d:\hard.xml").read() asset_to_json = xml2json.xml2json(asset_raw_data) asset_to_dict = json.loads(asset_to_json) value_dic = {"asset": asset_to_dict, "status": status} else: value_dic = {"status": 1} return value_dic
def test_strip_namespace(self): strip_ns = 1 json_string = xml2json.xml2json(xmlstring,options,strip_ns) json_data = json.loads(json_string) # namespace is stripped self.assertFalse(json_string.find("{http://www.w3.org/TR/html4/}table") != -1) # TODO , attribute shall be kept #self.assertTrue(json_string.find("@class") != -1) #print json_data["root"]["table"] #print json_data["root"]["table"][0]["tr"] self.assertTrue("table" in json_data["root"]) self.assertEqual(json_data["root"]["table"][0]["tr"]["td"] , ["Apples", "Bananas"])
def process_text(self, text, convert_to_json=True): try: self.send_text(text) output = self.receive_text() output = re.sub('\r', '', output) output = re.sub('\n', '', output) if convert_to_json: parser = OptionParser() parser.add_option("-p", "--pretty", default=True) (options, args) = parser.parse_args() output = xml2json(output, options) output = json.loads(output) return output except Exception as ex: LOG.info("An exception occured while processing.\n%s" % ex) return None
def main(): global tfile ix=0 options=optparse.Values({"pretty":False}) cmd=[] cmd.append('tcpdump') cmd.append('-w') cmd.append('-') cmd.append('-U') for item in dumpcfg: cmd.append(item) t=subprocess.Popen(cmd,bufsize=0,stdout=subprocess.PIPE,stderr=subprocess.PIPE) fdx=t.stdout.fileno() fl=fcntl.fcntl(fdx,fcntl.F_GETFL) fcntl.fcntl(fdx,fcntl.F_SETFL, fl | os.O_NONBLOCK) while True: tpkt="" try: tpkt=t.stdout.read() except: tpkt="" if tpkt!="": if ix!=0: ai=0 while ai<len(tpkt): npkt=tpkt[ai+8:ai+12] bi=struct.unpack('I',npkt)[0] pkt=tpkt[ai+16:bi] ai=ai+bi+16 fr=open(tfile,'wb') ofr=dpkt.pcap.Writer(fr) ofr.writepkt(pkt) ofr.close() fr=open("ex.pcap",'wb') fr.write(tpkt) fr.close() p=subprocess.Popen(['tshark','-T','pdml','-r',tfile],stdout=subprocess.PIPE,stderr=subprocess.PIPE) packet=packets_from_stdin(p.stdout) jpacket=xml2json.xml2json(packet,options,strip_ns=0) if odest=="": print jpacket else: fr=open(odest,"a") fr.write(jpacket) fr.close() else: ix=1
def server_app(environment, start_response): if environment["PATH_INFO"] != "/": print "Ignoring:", environment["PATH_INFO"] start_response("404 NOT FOUND", [("content-type", "text/plain")]) return [":-("] with open("books.xml") as f: ret = xml2json(f.read()) status = "200 OK" headers = [ ("content-type", "application/json"), ("content-length", str(len(ret))) ] start_response(status, headers) return [json.dumps(ret, sort_keys=True, indent=4)]
def convert(data, args): class holder(): pass placeholder = holder() placeholder.pretty = True if len(args) == 1: if (args[0] == "json" or args[0] == "JSON"): # xml2json() converts newlines and tabs in strings to "\n" & "\t". # Stripping them out of XML before conversion to JSON. rawxml = writePlistToString(readPlistFromString(data),binary=False) rawxml = rawxml.replace("\t","") rawxml = rawxml.replace("\n","") return [xml2json(rawxml,placeholder)] else: return [writePlistToString(readPlistFromString(data),binary=False)]
def test_strip_namespace(self): strip_ns = 1 json_string = xml2json.xml2json(xmlstring,options,strip_ns) json_data = json.loads(json_string) # namespace is stripped self.assertFalse(json_string.find("{http://www.w3.org/TR/html4/}table") != -1) # TODO , attribute shall be kept #self.assertTrue(json_string.find("@class") != -1) #print json_data["root"]["table"] #print json_data["root"]["table"][0]["tr"] #print json_data print json_string body = json.dumps(json_string) headers = {'Content-type': 'application/json'} r = requests.post("http://177.72.161.165:8084/fashionmanager-api/api/integration/product?gumgaToken=INTEGRACAO_DATACOPER",data=body,headers=headers)
def rdf2json(rdf_path): rdf_path = Path(rdf_path) rdf_json = xml2json(rdf_path.text()) data = OrderedDict() ebook_json = rdf_json['RDF']['ebook'] data['id'] = rdf_path.split('/')[-1].split('.')[0][2:] data['title'] = ebook_json.get('title') if not data['title']: return data if isinstance(data['title'], list): data['title'] = data['title'][0] data['issued'] = ebook_json['issued'] data['type'] = get_value(ebook_json['type']) data['language'] = get_values(ebook_json['language']) data['downloads'] = ebook_json.get('downloads') data['subjects'] = get_values(ebook_json.get('subject')) data['bookshelves'] = get_values(data.get('bookshelf')) author = ebook_json.get('creator', {}) if not author: return data if isinstance(author, list): data['author'] = [x.get('agent', {}).get('name') for x in author if x] else: author_name = author.get('agent', {}).get('name') data['author'] = [author_name] if author_name else [] data['description'] = data.get('description', []) # map from attrs to uri attrs_to_uri = get_attrs_to_uri(get_uri_to_attrs(rdf_path)) if isinstance(ebook_json['hasFormat'], list): data['files'] = [ normalize_file_json(obj['file'], attrs_to_uri) for obj in ebook_json['hasFormat'] ] else: data['files'] = [ normalize_file_json(ebook_json['hasFormat']['file'], attrs_to_uri) ] return data
def _request(self, url, args=dict()): if self.token: args["X-Plex-Token"] = self.token try: result = self.session.get("%s%s" % (self.url, url), params=args) logger.debug(u"PLEX => requested url: %(url)s" % {"url": url}) logger.debug(u"PLEX => requests args: %s" % args) if result.status_code == 401 and config.PMS_USER != "username" and config.PMS_PASS != "password": logger.debug(u"PLEX => request failed, trying with auth") self.session.headers.update( {'X-Plex-Client-Identifier': 'plexivity'}) self.session.headers.update({'Content-Length': 0}) self.session.auth = (config.PMS_USER, config.PMS_PASS) x = self.session.post( "https://my.plexapp.com/users/sign_in.xml") if x.ok: json = xml2json(x.content, strip_ns=False) self.token = json["user"]["authentication-token"] args["X-Plex-Token"] = self.token logger.debug( u"PLEX => auth successfull, requesting url %(url)s again" % {"url": url}) result = self.session.get("%s%s" % (self.url, url), params=args) else: return False if result and "xml" in result.headers['content-type']: import xml.etree.ElementTree as ET #json = xml2json(result.content, strip_ns=False) json = ET.fromstring(result.content) return json elif result.ok: return result.content else: logger.error(u"PLEX => there was an error with the request") return False except requests.ConnectionError: logger.error(u"PLEX => could not connect to Server!!!") return False
def convertXmlFileToJsonFile(filePath): global options; xmlFile = open(filePath, 'r'); #print (xmlString) jsonString = xml2json(xmlFile.read(), options, 0, 1); #print(jsonString) #open corresponding json file and write jsonString to it jsonFile = open(filePath.replace(sourceDir, destinationDir).replace('.xml', '.json'), 'w'); jsonFile.write(jsonString); jsonFile.close(); #remove old file if (removeOldXmlFile): print "removing file: " + filePath; remove(filePath);
def TestResume(self): with open(self.resumeFilePath, "rb") as resume_file: resume_encoded_string = base64.b64encode(resume_file.read()) #read file client = Client(self.RChilliApiUrl) # init soap client print client result = client.service.parseResumeBinary(resume_encoded_string, self.resumeFilePath, self.RChilliUserKey, self.RChilliVersion, self.RChilliSubuserID) #print result result = result.encode('ascii', 'ignore').decode('ascii') #print result options = optparse.Values({"pretty": False}) strip_ns = 1 json = xml2json(result, options, strip_ns) print json self.assertEqual(True, result != None )
def unzip2mongo(file, db): docs = [] zf = zipfile.ZipFile(file, 'r') for fn in zf.namelist(): try: js = json.loads(xml2json.xml2json(zf.read(fn))) for e in js["collection"]["record"]["controlfield"]: if e["_tag"]=="008": if e["#text"][35:38]=="eng": date = e["#text"][7:15].strip() # raw date with length of 8 shortdate = date[:4] if shortdate.isdigit(): daterange = date2daterange(int(shortdate)) else: daterange = 'Invalid' docs.append({"_id":fn[:-4], "raw": date, "range": daterange}) except KeyError: print 'ERROR: Did not find %s in zip file' % fn zf.close() db.date.insert(docs)
def extract(file): tb = list() if file == "sql": host = raw_input("Enter Host:") user = raw_input("Enter Username:"******"Enter pwd:") dtb = raw_input("Enter Database Name:") table = raw_input("Enter Table Name:") conn = pymysql.connect(host=host, user=user, password=pwd, db=dtb, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) temp = etl.fromdb(conn, "SELECT * FROM " + table) tb = d2l(temp) elif ".csv" in file: tb = etl.fromcsv(file) elif ".xlsx" in file: tb = etl.fromxls(file) elif ".json" in file: tb = etl.fromjson(file) print tb elif ".xml" in file: f = open(file, 'r').read() options = optparse.Values({"pretty": True}) jsn = json.dumps(xml2json.xml2json(f, options)) ob = json.loads(jsn.decode('string-escape').strip('"')) temp = dict() for key in ob.keys(): for skey in ob[key].keys(): temp = json.dumps(ob[key][skey]) with open("temp.json", "w") as tmp: tmp.write(temp) tb = etl.fromjson("temp.json") print tb[0] #tb = etl.fromxml(file,'.//ROW',{'Service_Name':'Service_Name','Status':'Status','Service_Type':'Service_Type','Time':'Time'}) elif ".txt" in file: tb = etl.fromtext(file) print tb return tb
def monitor(): shell_command = 'dxdiag /x d:\hard.xml' status = subprocess.call(shell_command,shell=True) #time.sleep(5); if status != 0: value_dic = {'status':stauts} else: if os.path.exists('d:\hard.xml'): asset_raw_data = file('d:\hard.xml').read() asset_to_json = xml2json.xml2json(asset_raw_data) asset_to_dict = json.loads(asset_to_json) value_dic = { 'asset':asset_to_dict, 'status':status } else: value_dic = {'status':1} return value_dic
def parseFile(parserXmlPath): # read the first line to get codec ... if its there encoding = 'utf-8' with open(parserXmlPath, "r") as f: line0 = f.readline() if line0[0:5] == '<?xml': idx = line0.index('encoding=') idxQuoteA = idx + len('encoding=') quoteChar = line0[idxQuoteA:idxQuoteA + 1] idxQuoteZ = line0.index(quoteChar, idxQuoteA + 1) encoding = line0[idxQuoteA + 1:idxQuoteZ] # lines = [] with codecs.open(parserXmlPath, "r", encoding) as g: for line in g: if line[0:5] != ['<', '?', 'x', 'm', 'l']: lines.append(line) # xmlStr = "".join(lines) jsonStr = xml2json(xmlStr, myOpt()) jsonObj = json.loads(jsonStr) return jsonObj
def _request(self, url, args=dict()): if self.token: args["X-Plex-Token"] = self.token try: result = self.session.get("%s%s" % (self.url, url), params=args) logger.debug(u"PLEX => requested url: %(url)s" % {"url": url}) logger.debug(u"PLEX => requests args: %s" % args) if result.status_code == 401 and config.PMS_USER != "username" and config.PMS_PASS != "password": logger.debug(u"PLEX => request failed, trying with auth") self.session.headers.update({'X-Plex-Client-Identifier': 'plexivity'}) self.session.headers.update({'Content-Length': 0}) self.session.auth = (config.PMS_USER, config.PMS_PASS) x = self.session.post("https://my.plexapp.com/users/sign_in.xml") if x.ok: json = xml2json(x.content, strip_ns=False) self.token = json["user"]["authentication-token"] args["X-Plex-Token"] = self.token logger.debug(u"PLEX => auth successfull, requesting url %(url)s again" % {"url": url}) result = self.session.get("%s%s" % (self.url, url), params=args) else: return False if result and "xml" in result.headers['content-type']: import xml.etree.ElementTree as ET #json = xml2json(result.content, strip_ns=False) json = ET.fromstring(result.content) return json elif result.ok: return result.content else: logger.error(u"PLEX => there was an error with the request") return False except requests.ConnectionError: logger.error(u"PLEX => could not connect to Server!!!") return False
def parse2labelx(vocpath, domain, skip_class=''): vocpath = vocpath.rstrip('/') print 'start: jsonlist will be saved to', basename( vocpath) + '.detect.json' annotations_path = join(vocpath, 'Annotations') xmllist = [ x for x in listdir(annotations_path) if isfile(join(annotations_path, x)) and not x[0] == '.' ] jsonlist = [] for xml in xmllist: label = xml2json(join(annotations_path, xml)) label['url'] = 'http://%s/%s/%s' % ( domain, join(basename(vocpath), 'JPEGImages'), label['url']) skip_item = [] for item in label['label']['detect']['general_d']['bbox']: if item['class'] == skip_class: skip_item.append(item) for item in skip_item: label['label']['detect']['general_d']['bbox'].remove(item) if label['label']['detect']['general_d']['bbox']: jsonlist.append(label) if jsonlist: r = '\n'.join([json.dumps(item) for item in jsonlist]) jsonlist_name = basename(vocpath) with open(jsonlist_name + '.detect.json', 'w+') as f: f.write(r) else: print 'valid annotations empty' print 'success'
async def post(self): msg = self.request.body msg_dict = json.loads(xml2json.xml2json(msg.decode(), optparse.Values({'pretty': True}))) logging.error(msg_dict) msg_dict = msg_dict.get('xml', {}) event = msg_dict.get('Event', '') openid = msg_dict.get('FromUserName', '') username_and_ktv_id = msg_dict.get('EventKey', '') username, ktv_id = list(map(int, username_and_ktv_id.split(','))) if event not in ('subscribe', 'SCAN') or not username or not ktv_id: #action return user = ctrl.pay.get_ktv_fin_account(username) if not user: username = int(username) password_org = random.randint(99999,1000000) content = "【雷石KTV】欢迎登陆财务管理账号,账户名为:%s,初始密码:%s。" % (username, password_org) msg = await ctrl.pay.send_message_ctl(username=username, password_org=password_org, ktv_id=ktv_id, content=content) if msg['type'] == 2: return await utils.async_common_api('/wx/custom/send', dict(openid=openid, text='https://erp.ktvsky.com/ktv_fin_curdata', msgtype='text', gzh=''))
def xml_collections_endpoint(**lookup): resource = _resource() response = None method = request_method() if request.content_type.endswith("/xml"): if method == "POST": # response = post(resource, payl=xml2json(request.data)) response = post_internal(resource, payl=xml2json(request.data), skip_validation=True) elif method == "GET": response = collections_endpoint(**lookup) l = json.loads(response.data.decode('utf-8'))['_items'] response.data = xmltodict.unparse( {'gdxp': { "supplier": list(map(popper, l)) }}) else: raise NotImplementedError('Not implemented') return send_response(resource, response) else: return collections_endpoint(**lookup)
def _extract_id_from_raw_xml(xml): # the code this is replacing didn't deal with the error either # presumably because it's already been run once by the time it gets here _, json_form = xml2json.xml2json(xml) return _extract_meta_instance_id(json_form) or ''
def AutoEditMovie(avNumber): #獲取Title #方法1 try: InfoCommand = 'you-get -i -c ./cookies.txt http://www.bilibili.com/video/av' + avNumber + '/' InfoData = os.popen(InfoCommand) Videoinfo = InfoData.readlines() #读取命令行的输出到一个list InfoList = [] for line in Videoinfo: #按行遍历 line = line.strip('\r\n') InfoList.append(line) TitleOne = (InfoList[1].split(': ', 1))[1] except: TitleOne = "" print("Can't get title1.") #方法2 try: InfoUrl = 'http://www.bilibilijj.com/Api/AvToCid/' + avNumber + '/1' with urllib.request.urlopen(InfoUrl) as response: html = response.read() def html_decode(s): htmlCodes = (("'", '''), ('"', '"'), ('>', '>'), ('<', '<'), ('&', '&')) for code in htmlCodes: s = s.replace(code[1], code[0]) return s htmldecode = html_decode(html.decode("utf-8")) InfoData = json.loads(htmldecode) TitleTwo = InfoData['title'] except: TitleTwo = "" print("Can't get title2.") print(TitleOne, TitleTwo) if (TitleTwo != ""): Title = TitleTwo else: Title = TitleOne if (Title == ""): updatedb.UpdateProgress(avNumber, 4, "發生錯誤或影片不存在") #1=下載 return #下載 try: updatedb.UpdateProgress(avNumber, 1, Title) #1=下載 DownCommand = 'you-get -o temp -y proxy.uku.im:443 -c ./cookies.txt http://www.bilibili.com/video/av' + avNumber + '/' DoDownload = os.system(DownCommand) except: updatedb.UpdateProgress(avNumber, 4, "下載失敗") #1=下載 return #判斷是否下載完畢 #if(os.path.isfile('./temp/'+TitleOne+'.flv')): # print("FLV Download Complete!") # VideoFileName = TitleOne+'.flv' #if(os.path.isfile('./temp/'+TitleTwo+'.flv')): # print("FLV Download Complete!") # VideoFileName = TitleTwo+'.flv' #if(os.path.isfile('./temp/'+TitleOne+'.mp4')): # print("MP4 Download Complete!") # VideoFileName = TitleOne+'.mp4' #if(os.path.isfile('./temp/'+TitleTwo+'.mp4')): # print("MP4 Download Complete!") # VideoFileName = TitleTwo+'.mp4' if (os.path.isfile('./temp/' + Title + '.flv')): print("FLV Download Complete!") VideoFileName = Title + '.flv' if (os.path.isfile('./temp/' + Title + '.mp4')): print("MP4 Download Complete!") VideoFileName = Title + '.mp4' try: if (os.path.isfile('./temp/' + Title + '.cmt.xml')): print("Found Danmuku!") xml2json.xml2json('./temp/' + Title + '.cmt') if (os.path.isfile('./temp/' + Title + '.cmt.json')): Highlights = countbase.HightLight('./temp/' + Title + '.cmt') print("Found HighLight!") print(Highlights) ResVideoFileName = './temp/' + VideoFileName updatedb.UpdateProgress(avNumber, 2, "0") #2=剪輯 Highlights = coverdhl.CoverHLS(Highlights) print(Highlights) except: updatedb.UpdateProgress(avNumber, 4, "精彩片段分析出錯") #1=下載 return def CutVideo(Highlights, VideoFileName, avNumber): HighlightList = [] for HighlightsTime in Highlights: HighlighClip = VideoFileClip(ResVideoFileName).subclip( HighlightsTime[0], HighlightsTime[1]) HighlightList.append(HighlighClip) # 音量×0.8 Merge_clip = concatenate_videoclips(HighlightList) clip = Merge_clip.volumex(0.8) # 加标题 txt_clip = TextClip(VideoFileName[:-4] + "\n精彩片段", font="font.otf", fontsize=40, color='white') # 标题持续5秒 txt_clip = txt_clip.set_pos('center').set_duration(5) # 合并标题和影片 video = CompositeVideoClip([clip, txt_clip]) # 输出影片 #video.write_videofile(VideoFileName[:-4]+"_edited.mp4") #video.write_videofile("videos/"+avNumber.decode('utf-8')+".mp4") video.write_videofile("videos/" + avNumber + ".mp4") try: CutVideo(Highlights, VideoFileName, avNumber) updatedb.UpdateProgress(avNumber, 3, "0") #3=完成 return except: removelast.RemoveLastHL(Highlights) print(Highlights) try: CutVideo(Highlights, VideoFileName, avNumber) updatedb.UpdateProgress(avNumber, 3, "0") #3=完成 return except: updatedb.UpdateProgress(avNumber, 4, "剪輯發生錯誤") #1=下載 return else: updatedb.UpdateProgress(avNumber, 3, "0") #3=完成 return
from xml2json import xml2json # to get a json string converter = xml2json("test.xml", encoding="utf-8") print(converter.get_json())
import sys, xml2json if (len(sys.argv) < 3 or sys.argv[1] == '?'): print "Usage: python update_token_db.py <infile.xml> <outfile.json>" exit(1) infname = sys.argv[1] outfname = sys.argv[2] with open(infname, 'r') as inf: with open(outfname, 'w') as outf: data = inf.read().replace('\n', '') jdata = xml2json.xml2json(data) outf.write(jdata)
def main(): p = optparse.OptionParser( description='Converts Maven metadata XML file to Cohorte Website latest.json JSON file.', prog='latest', usage='%prog -o file.json [url]' ) p.add_option('--out', '-o', help="Write to OUT instead of stdout") p.add_option('--version', '-v', help="Cohorte Version") options, arguments = p.parse_args() if options.version: version = options.version if options.version.endswith("SNAPSHOT"): url_path = DEV_REPO_URL + "/" + version stage = "dev" else: url_path = RELEASE_REPO_URL + "/" + version stage = "release" else: print("which cohorte's version? e.g. 1.0.1-SNAPSHOT") print(url_path) if stage == "dev": fp = urllib2.urlopen(url_path + "/" + MAVEN_FILE) input = fp.read() options.pretty = True out = xml2json.xml2json(input, options, 1, 1) # generate cohorte file json_data = json.loads(out) json_final_file = {} def add_dist(dist_name): json_final_file["cohorte-"+dist_name+"-distribution"] = {} if stage == "dev": version_file_path = get_file_path(url_path, json_data, dist_name, "version") changelog_file_path = get_file_path(url_path, json_data, dist_name, "changelog") dist_file_path = get_file_path(url_path, json_data, dist_name, "dist") else: version_file_path = url_path + "/cohorte-" + version + "-" + dist_name + "-distribution-version.js" changelog_file_path = url_path + "/cohorte-" + version + "-" + "changelog.txt" dist_file_path = url_path + "/cohorte-" + version + "-" + dist_name + "-distribution.tar.gz" try: version_file_stream = urllib2.urlopen(version_file_path) version_file = version_file_stream.read() version_json = json.loads(version_file) json_final_file["cohorte-"+dist_name+"-distribution"]["version"] = version_json["version"] json_final_file["cohorte-"+dist_name+"-distribution"]["stage"] = version_json["stage"] json_final_file["cohorte-"+dist_name+"-distribution"]["timestamp"] = version_json["timestamp"] json_final_file["cohorte-"+dist_name+"-distribution"]["changelog"] = changelog_file_path json_final_file["cohorte-"+dist_name+"-distribution"]["files"] = { "tar.gz" : dist_file_path} except: json_final_file["cohorte-"+dist_name+"-distribution"]["version"] = "" json_final_file["cohorte-"+dist_name+"-distribution"]["stage"] = "" json_final_file["cohorte-"+dist_name+"-distribution"]["timestamp"] = "" json_final_file["cohorte-"+dist_name+"-distribution"]["changelog"] = "" json_final_file["cohorte-"+dist_name+"-distribution"]["files"] = { "tar.gz" : ""} add_dist("python") add_dist("macosx") add_dist("linux") add_dist("windows") if (options.out): file = open(options.out, 'w') file.write(json.dumps(json_final_file, sort_keys=True, indent=2, separators=(',', ': '))) file.close() else: print(out)
def get_pubmed_article(self, item): article = dict() try: handle = Entrez.efetch(db=self.__cfg.EntrezDB, id=item, rettype="gb", retmode="xml") result = handle.read() json_string = json.loads( xml2json.xml2json(result, optparse.Values({"pretty": False}), 0)) #print json.dumps(json_string) if 'PubmedArticle' in json_string['PubmedArticleSet'].keys(): if 'Abstract' in json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation'].keys(): article_list = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'][ 'Abstract']['AbstractText'] if type(article_list) == list: formatted_list = [ x['@Label'].encode('utf-8', 'ignore') + '\n' + x['#text'].encode('utf-8', 'ignore') for x in article_list ] article['content'] = '\n\n'.join(formatted_list) elif type(article_list) == dict: article['content'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'][ 'Abstract']['AbstractText']['#text'].replace( u'\xa0', u' ').encode('utf-8', 'ignore') else: article['content'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'][ 'Abstract']['AbstractText'].replace( u'\xa0', u' ').encode('utf-8', 'ignore') else: article['content'] = 'No abstract available.' if 'AuthorList' in json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'].keys(): def return_name(y): x = None if isinstance(y, str): x = y else: if 'LastName' in y.keys(): x = y['LastName'].encode('utf-8', 'ignore') if 'ForeName' in y.keys(): x = x + ', '.encode( 'utf-8') + y['ForeName'].encode( 'utf-8', 'ignore') else: x = y['CollectiveName'].encode( 'utf-8', 'ignore') return x article['authors'] = [ return_name(x).decode('utf-8') for x in json_string['PubmedArticleSet']['PubmedArticle'] ['MedlineCitation']['Article']['AuthorList']['Author'] ] else: article['authors'] = ['No authors informed.'] article['pmid'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['PMID'][ '#text'].encode('utf-8', 'ignore') try: article['title'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'][ 'ArticleTitle']['#text'].encode('utf-8', 'ignore') except: article['title'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'][ 'ArticleTitle'] article['journal'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article']['Journal'][ 'Title'].encode('utf-8', 'ignore') try: article['journal_issn'] = json_string['PubmedArticleSet'][ 'PubmedArticle']['MedlineCitation']['Article'][ 'Journal']['ISSN']['#text'].encode( 'utf-8', 'ignore') except: article['journal_issn'] = None try: article_journal_PubDate_json = json_string[ 'PubmedArticleSet']['PubmedArticle'][ 'MedlineCitation']['Article']['Journal'][ 'JournalIssue']['PubDate'] except: article_journal_PubDate_json = None if article_journal_PubDate_json: if 'MedlineDate' in article_journal_PubDate_json.keys(): article['publication_date'] = None article['publication_year'] = None else: dt_year = article_journal_PubDate_json['Year'] article['publication_year'] = dt_year try: dt_month = article_journal_PubDate_json['Month'] except: dt_month = 'Jan' try: dt_day = article_journal_PubDate_json['Day'] except: dt_day = '1' article['publication_date'] = datetime.date( int(dt_year), self.__month_cnv[dt_month], int(dt_day)) else: article['publication_date'] = None article['publication_year'] = None else: article['content'] = 'No abstract available.' article['authors'] = 'No authors informed.' article['pmid'] = None article['title'] = 'No title.' article['journal'] = 'No journal.' article['journal_issn'] = 'No ISSN.' article['publication_date'] = None article['publication_year'] = None except: article['error'] = traceback.format_exc() return article
annotation['iscrowd'] = int(shape['difficult']) # True mean treat the box as background annotation['ignore'] = 0 # int(shape['difficult']) annotation['area'] = bndbox[2] * bndbox[3] return annotation # read json file,return a json object def read_jsonfile(self, path): with open(path, "r", encoding='utf-8') as f: return json.load(f) if __name__ == '__main__': # Convert all xml to json if XML_CONVERT == True: xml2json(labelme_path) # Create file path if not os.path.exists("%scoco/annotations/" % saved_coco_path): os.makedirs("%scoco/annotations/" % saved_coco_path) keyClasses_ = list(classname_final.keys()) if not os.path.exists("%scoco/train2017/" % saved_coco_path): os.makedirs("%scoco/train2017" % saved_coco_path) for label_ in keyClasses_: if not os.path.exists(saved_coco_path + "coco/train2017/" + label_ + '/'): os.makedirs(saved_coco_path + "coco/train2017/" + label_ + '/') if not os.path.exists("%scoco/val2017/" % saved_coco_path): os.makedirs("%scoco/val2017" % saved_coco_path) for label_ in keyClasses_: if not os.path.exists(saved_coco_path + "coco/val2017/" + label_ + '/'):