def perform_request(self, method, resp_format, api_url, payload): """:description: POST or PUT request with json or xml used by preview, place and cancel :param method: PUT or POST method :type method: session, required :param resp_format: Desired Response format, defaults to xml :type resp_format: str, required :param api_url: API URL :type api_url: str, required :param payload: Payload :type payload: str[], required :return: Return request :rtype: xml or json based on ``resp_format`` :EtradeRef: https://apisb.etrade.com/docs/api/order/api-order-v1.html """ LOGGER.debug(api_url) LOGGER.debug("payload: %s", payload) if resp_format == "json": req = method(api_url, json=payload, timeout=self.timeout) else: headers = {"Content-Type": "application/xml"} payload = jxmlease.emit_xml(payload) LOGGER.debug("xml payload: %s", payload) req = method(api_url, data=payload, headers=headers, timeout=self.timeout) LOGGER.debug(req.text) req.raise_for_status() if resp_format == "json": return req.json() if resp_format is None: return jxmlease.parse(req.text) return req.text
def perform_request(self, method, resp_format, api_url, payload): """ run a post or put request with json or xml used by preview, place and cancel """ LOGGER.debug(api_url) LOGGER.debug("payload: %s", payload) if resp_format == "json": req = method(api_url, json=payload, timeout=self.timeout) else: headers = {"Content-Type": "application/xml"} payload = jxmlease.emit_xml(payload) LOGGER.debug("xml payload: %s", payload) req = method(api_url, data=payload, headers=headers, timeout=self.timeout) LOGGER.debug(req.text) req.raise_for_status() if resp_format == "json": return req.json() if resp_format is None: return jxmlease.parse(req.text) return req.text
def _get_xml(self, comments): """Format comments list to xml format""" result = "" for comment in comments: result += "<comment>\n" + jxmlease.emit_xml( comment) + "\n</comment>\n" return result
def dataXML(mod_folder): folders = glob.glob(mod_folder + '/modules/shNode.*') data = '{\'shNode\': {' for i in folders: proc = subprocess.Popen(i + '/data', stdout=subprocess.PIPE) output = proc.stdout.read() data = data + str(jxmlease.parse(output)) data = data + '}' data = data.replace('{{', '{') data = data.replace('}}{', '}, ') print(jxmlease.emit_xml(eval(data)))
def post(self): parser.add_argument('region', type=str, location='json') parser.add_argument('periodType', type=str, location='json') parser.add_argument('timeToElapse', type=str, location='json') parser.add_argument('reportedCases', type=str, location='json') parser.add_argument('population', type=str, location='json') parser.add_argument('totalHospitalBeds', type=str, location='json') args = parser.parse_args() region_data=str(args.region).replace("'", '"') args['region']= json.loads(region_data); result = mymodule.estimator(args) xml = jxmlease.emit_xml(result) print(xml) return xml, 200, {'Content-Type': 'text/xml; charset=utf-8'}
def __init__(self): """INIT""" self.pprint = lambda x: json.dumps( x, indent=4, sort_keys=True, default=str, ensure_ascii=False) self.lxml_response_parser = lambda xml_obj: jxmlease.parse_etree( xml_obj) self.jxmlease_response_to_dict = lambda xml_obj: json.loads( self.pprint(xml_obj)) self.lxml_response_to_dict = lambda xml_obj: eval( self.pprint(self.lxml_response_parser(xml_obj))) self.xml_string_to_dict = lambda xml_str: jxmlease.parse(xml_str) self.dict_to_xml_string = lambda xml_dict: jxmlease.emit_xml(xml_dict) self.xml_obj_to_string = lambda xml_obj: self.dict_to_xml_string( self.lxml_response_parser(xml_obj)) self.xml_str_to_dict = self.xml_string_to_dict self.dict_to_xml_str = self.dict_to_xml_string self.xml_obj_to_dict = self.lxml_response_parser
async def print_message(sid, message): # dumps na mensagem pra ter json perfeitamente em str message = json.dumps(message) print("Ta vindo: ", message) # Parseia o que vem do javascript obj = json.loads((message)) folder = "datafolder/" if (obj["type"] == "1"): # Gera arquivo JSON f = open(folder + "data.json", "w") f.write(json.dumps(obj["noc"])) f.close() print("Gerei o arquivo JSON!") elif (obj["type"] == "2"): # Gera arquivo XML dataXML = jxmlease.emit_xml(obj["noc"]) f = open(folder + "data.xml", "w") f.write(dataXML) f.close() print("Gerei o arquivo XML!")
def getVulnerabilitiesForAsset(self, assetId, assetData): vulnerabilityUrl = self.base_url + str(assetId) + "/vulnerabilities" vulnerabilityJson = self.getKennaResources(vulnerabilityUrl) for x in vulnerabilityJson['vulnerabilities']: x.pop('urls') x.pop('asset_id') assetData['vulnerability_info'] = assetData.pop('urls') assetData['vulnerability_info'] = vulnerabilityJson self.tempAsset['asset'] = assetData temp_file_path = os.path.join(self.mount_dir, "data_file.json") with open(temp_file_path, "w") as write_file: json.dump(self.tempAsset, write_file) with open(temp_file_path) as json_file: dict_data = json.load(json_file) data_xml = jxmlease.emit_xml(dict_data) self.writeToFile(data_xml, assetId)
def generate_live_feeds(self, feed): values = {} values['feed'] = feed prod_temp_obj = self.env['product.template'] values['base_url'] = self.env['ir.config_parameter'].sudo().get_param( 'web.base.url') values['prod_data_feeds'] = [] if feed and feed.product_ids and feed.feed_selection == 'product': values['prod_data_feeds'] = prod_temp_obj.sudo().search([ ('sale_ok', '=', True), ('id', 'in', feed.product_ids.ids) ]) if feed and feed.product_category_ids and feed.feed_selection == 'category': values['prod_data_feeds'] = prod_temp_obj.sudo().search([ ('sale_ok', '=', True), ('categ_id', 'in', feed.product_category_ids.ids) ]) if feed and feed.product_public_categ_ids and feed.feed_selection == 'web_category': values['prod_data_feeds'] = prod_temp_obj.sudo().search([ ('sale_ok', '=', True), ('public_categ_ids', 'in', feed.product_public_categ_ids.ids) ]) dlist = [] full_final_prod_dict = {} collections_field_with_type = [] if not values.get('base_url'): values['base_url'] = 'http://127.0.0.1:8069' # for field_name, field in prod_temp_obj._fields.iteritems(): for field_name, field in prod_temp_obj._fields.items(): product_data = {} field_type = prod_temp_obj._fields.get(field_name) product_data[field_name] = field_type.type collections_field_with_type.append(product_data) default_lang = '' if feed.feed_lang_id: default_lang = feed.feed_lang_id.code else: default_lang = 'en_US' assign_parent = feed.assign_parent_tag for product in values['prod_data_feeds']: d = OrderedDict() start_main_tag = "" global list_of_main_tags global dynamic_val1 list_of_main_tags = [] first_line = feed.product_pattern.split('\n')[0] # use to make parent having only words like "<item>" if re.search('>(.*)', first_line) and re.search( '>(.*)', first_line).group(1) == '': assign_parent = re.search('<(.*)>', first_line).group(1) for final in feed.product_pattern.split('\n'): if final: starting = ending = tag_value = None starting_sqr = tag_value_sqr = tag_value_replica = None if not re.search('<(.*)>\(', final): if re.search('<(.*)>{', final): starting = re.search('<(.*)>{', final).group(1) if re.search('{(.*)}', final): tag_value = re.search('{(.*)}', final).group(1) if tag_value and re.search('(.*)}\[', final): tag_value_replica = re.search( '\[(.*)\]', final).group(1) # use to define static website with dynamic data like ([static]{dynamic}) # or ({dynamic}[static]) dynamic_link_tag = dynamic_val2 = None dynamic_val1 = {} if re.search('<(.*)>\(', final): starting = re.search('<(.*)>\(', final).group(1) # finding th sequece for placing static data after or before the actual value # --------------start finding sequence------------------------ if re.search('\(\[(.*)\]', final): dynamic_val1[0] = re.search('\[(.*)\]', final).group(1) if re.search('\[(.*)\]\)', final): dynamic_val1[1] = re.search('\[(.*)\]', final).group(1) if re.search('{(.*)}', final): tag_value = re.search('{(.*)}', final).group(1) # --------------end finding sequence------------------------ if re.search('<(.*)>\[', final): starting_sqr = re.search('<(.*)>\[', final).group(1) if re.search('\[(.*)\]', final): tag_value_sqr = re.search('\[(.*)\]', final).group(1) # use main tag to make it parent and all data tags after # it can consider as child till ending main tag was not fount main_tag_data = re.search('<(\w+):(\w+)>', final) if main_tag_data: rest_data = re.search('>(.*)', final) main_tag_data = re.search('<(.*)>', final).group(1) if rest_data and rest_data.group( 1) == '' and main_tag_data != assign_parent: start_main_tag = main_tag_data list_of_main_tags.append(str(start_main_tag)) if re.search('<(.*)>', final) and start_main_tag: if re.search('<(.*)>', final) and re.search( '<(.*)>', final).group(1)[0] == '/': start_main_tag = '' main_tag_data = re.search('<(\w+)>', final) if main_tag_data: rest_data = re.search('>(.*)', final) main_tag_data = re.search('<(.*)>', final).group(1) if rest_data and rest_data.group( 1) == '' and main_tag_data != assign_parent: start_main_tag = main_tag_data list_of_main_tags.append(str(start_main_tag)) if re.search('<(.*)>', final) and start_main_tag: if re.search('<(.*)>', final) and re.search( '<(.*)>', final).group(1)[0] == '/': start_main_tag = '' my_domain = False tag_name = False if tag_value and tag_value.find('.') != -1: val = tag_value.split('.') my_domain = val[0] tag_value = val[1] if tag_value and tag_value.find(':') != -1: val = tag_value.split(':') tag_name = val[0] tag_value = val[1] if not starting and starting_sqr and tag_value_sqr: self.store_data(main_dict=d, tag=starting_sqr, parent_tag=start_main_tag, val=tag_value_sqr) ########################################################################## tag_val_list = [] global re_list re_list = None if re.search('<(.*)>\(', final): re_list = re.findall(r"(\[([^][]+)\]|\{([\w+.:]+)\})", str(final)) if re_list: for rec in re_list: if rec[2] != '': fin_val = rec[2].split( '.')[-1:][0] if rec[2].find( '.') else rec[2] fin_val = rec[2].split( ':')[-1:][0] if rec[2].find( ':') else rec[2] tag_val_list.append(fin_val) if starting and tag_value and not starting_sqr: if not tag_val_list: tag_val_list.append(tag_value) lsd = [] for tag_value in tag_val_list: val = '' if tag_value and tag_value.find('.') != -1: val = tag_value.split('.') my_domain = val[0] tag_value = val[1] if tag_name != 'variants': field_val = None # use when the product field does not exists if hasattr(product, tag_value): field_val = product.read([tag_value])[0] else: val = tag_value_replica if tag_value_replica else '' for field_and_type in collections_field_with_type: if tag_value == list( field_and_type.keys())[0]: if field_and_type.get(tag_value) in [ 'char', 'text', 'float', 'integer', 'selection', 'boolean', 'date', 'datetime', 'html' ]: if not my_domain: if tag_value in ( 'lst_price', 'list_price', 'standard_price'): final_price = [] for prod in self.env[ 'product.product'].sudo( ).search([( 'product_tmpl_id', '=', product.id)]): final_price.append( self. my_compute_price( prod, feed. pricelist_id)) val = str( json.dumps( list(final_price)) ).strip('[]') if tag_value not in ( 'lst_price', 'list_price', 'standard_price'): if field_val.get( tag_value): # import sys # reload(sys) # sys.setdefaultencoding('utf-8') convert_string = str( field_val.get( tag_value)) self.env.cr.execute( 'select value from ir_translation where lang=%s and type=%s and src like %s;', (default_lang, 'model', (convert_string or u'').strip()) ) # .encode('utf-8'))) res_trans = self.env.cr.fetchone( ) if field_and_type.get( tag_value) in [ 'html' ]: if res_trans and res_trans[ 0]: element = re.sub( '<[A-Za-z\/][^>]*>', '', res_trans[ 0]) val = element if element else '' else: res = field_val.get( tag_value) element = re.sub( '<[A-Za-z\/][^>]*>', '', res) val = element if element else '' else: if res_trans: val = res_trans[ 0] if res_trans[ 0] else '' else: val = field_val.get( tag_value ) if field_val.get( tag_value ) else '' else: replica_val = tag_value_replica if tag_value_replica else field_val.get( tag_value) val = replica_val if my_domain: if field_val.get(tag_value): if values['base_url'][ -1:] != '/' and field_val.get( tag_value )[0] != '/': base_val = str( values['base_url'] ) + '/' + str( field_val.get( tag_value)) val = base_val else: base_val = values[ 'base_url'] + field_val.get( tag_value) val = base_val else: base_val = values['base_url'] + \ field_val.get(tag_value) val = base_val if field_and_type.get( tag_value) == 'binary': product_image = self.my_image_url( product, 'image_1920', ) if not my_domain: val = product_image if my_domain: val = values['base_url'] + \ product_image if field_and_type.get( tag_value) == 'many2one': if field_val.get( tag_value) and len( field_val.get( tag_value)) >= 2: if '/' in field_val.get( tag_value)[1]: res_trans_pack = [] for trans in field_val.get( tag_value )[1].split('/'): self.env.cr.execute( 'select value from ir_translation where lang=%s and src like %s;', (default_lang, str(trans.strip()) ) ) # .encode('utf-8')))) dataa = self.env.cr.fetchone( ) if dataa: res_trans_pack.append( dataa) if res_trans_pack: val = ' / '.join( t[0] for t in res_trans_pack if t) else: val = field_val.get( tag_value)[1] else: self.env.cr.execute( 'select value from ir_translation where lang=%s and type=%s and src like %s;', (default_lang, 'model', str( field_val.get( tag_value)[1]) ) ) # .encode('utf-8')))) res_trans = self.env.cr.fetchone( ) if res_trans: val = res_trans[0] else: if field_val.get( tag_value): val = field_val.get( tag_value)[1] else: val = '' else: if tag_value_replica: val = tag_value_replica else: val = '' if field_and_type.get(tag_value) in [ 'one2many', 'many2many' ]: fields_obj = self.env[ 'ir.model.fields'] field_value = fields_obj.sudo( ).search([('model', '=', 'product.template'), ('name', '=', tag_value) ]) records = self.env[ field_value.relation].sudo( ).search([ ('id', 'in', field_val.get(tag_value)) ]) data = [] for rec in records: if rec._rec_name and rec._rec_name == 'name': if rec.name: if isinstance( rec.name, unicode): self.env.cr.execute( 'select value from ir_translation where lang=%s and type=%s and src like %s;', (default_lang, 'model', str(rec.name)) ) # .encode('utf-8')))) res_trans = self.env.cr.fetchone( ) if type(res_trans ) is tuple: res_trans = res_trans[ 0] if res_trans: data.append( res_trans) else: data.append( rec.name) else: self.env.cr.execute( 'select value from ir_translation where lang=%s and type=%s and src like %s;', (default_lang, 'model', str(rec.name. name)) ) # .encode('utf-8')))) res_trans = self.env.cr.fetchone( ) if res_trans: data.append( res_trans) else: data.append( rec.name. name) val = ', '.join(set(data)) if not data: if tag_value_replica: val = tag_value_replica if tag_name == 'variants': if len(product.product_variant_ids) > 1: variantss = [] for variant in product.product_variant_ids: for var in variant.attribute_line_ids.search( [('product_tmpl_id', '=', product.id), ('attribute_id.name', 'ilike', _(tag_value))]): if var.value_ids: for v in var.value_ids: self.env.cr.execute( 'select value from ir_translation where lang=%s and type=%s and src like %s;', (default_lang, 'model', str(v.name)) ) # .encode('utf-8')))) res_trans = self.env.cr.fetchone( ) if res_trans: variantss.append( res_trans[0]) else: variantss.append( v.name) val = ', '.join(set(variantss)) elif tag_value_replica: val = tag_value_replica lsd.append(val if val else '') self.store_data(main_dict=d, tag=starting, parent_tag=start_main_tag, val=lsd) else: # use to make an empty tag visible in xml like ex.: <g:id></g:id> check_empty_tag = re.search( '<(.*)><(.*)', str(final).replace(' ', '')) if check_empty_tag and check_empty_tag.group( 1) and check_empty_tag.group(2)[0] == '/': empty_val_tag = check_empty_tag.group(1) self.store_data(main_dict=d, tag=empty_val_tag, parent_tag=start_main_tag, val='') dlist.append(d) if all(d for d in dlist): if feed.sudo().extension == 'csv': keys, values_data = [], [] data = '' for datalist in dlist: for key, value in dict(datalist).items(): if key not in keys: keys.append(str(key)) values_data.append(value) i = 0 data = str(keys)[1:-1] data = data + "\n" for val in values_data: if i % len(keys) == 0 and i != 0: data = data[:-1] + '\n' for v in val: v = str(v).replace(",", "|") data = data + v + ',' i = i + 1 feed.sudo().final_product_pattern = data[:-1].replace("'", "") elif feed.sudo().extension == 'txt': keys, values_data = [], [] data = '' for datalist in dlist: for key, value in dict(datalist).items(): if key not in keys: keys.append(str(key)) values_data.append(value) i = 0 data_title = "" for j in keys: data_title = data_title + j + "\t" data = data_title data = data + "\n" for val in values_data: if i % len(keys) == 0 and i != 0: data = data[:-1] + '\n' for v in val: v = str(v).replace(",", "|") data = data + v + "\t" i = i + 1 feed.sudo().final_product_pattern = data[:-1].replace("'", "") else: full_final_prod_dict.update({assign_parent: dlist}) full_final_prod = jxmlease.emit_xml(full_final_prod_dict) new_full_final_prod = full_final_prod.replace( '<?xml version="1.0" encoding="utf-8"?>', '') feed.sudo().final_product_pattern = new_full_final_prod return values
def XmlResponse(data, *args, **kwargs): return HttpResponse(emit_xml(data), *args, content_type="text/xml", **kwargs)