def verify(self, manager, uri, response, respdata, args): #@UnusedVariable # Get arguments files = args.get("filepath", []) # status code must be 200, 207 if response.status not in (200, 207): return False, " HTTP Status Code Wrong: %d" % (response.status,) # look for response data if not respdata: return False, " No response body" # look for one file if len(files) != 1: return False, " No file to compare response to" # read in all data from specified file fd = open(files[0], "r") try: try: data = fd.read() finally: fd.close() except: data = None if data is None: return False, " Could not read data file" data = manager.server_info.subs(data) result = True if data != respdata: data = data.replace("\n", "\r\n") if data != respdata: # If we have an iCalendar file, then unwrap data and do compare if files[0].endswith(".ics"): data = data.replace("\r\n ", "") respdata = respdata.replace("\r\n ", "") if data != respdata: result = False elif files[0].endswith(".xml"): try: respdata = tostring(ElementTree(file=StringIO(respdata)).getroot()) except Exception: return False, " Could not parse XML response: %s" % (respdata,) try: data = tostring(ElementTree(file=StringIO(data)).getroot()) except Exception: return False, " Could not parse XML data: %s" % (data,) if data != respdata: result = False else: result = False if result: return True, "" else: error_diff = "\n".join([line for line in unified_diff(data.split("\n"), respdata.split("\n"))]) return False, " Response data does not exactly match file data %s" % (error_diff,)
def __init__(self, inst_el): """ inst_el is an ElementTree element representing a movie instance, extracted from train.xml or testcases.xml """ self.id = inst_el.attrib['id'] for child_el in inst_el: try: if child_el.tag == "regy": # opening week revenue self.target = float(child_el.attrib['yvalue']) elif child_el.tag == "text": # reviews self.__dict__[child_el.attrib['tlabel']] = asciify(child_el.text) elif child_el.tag.endswith('release'): # special weekend releases self.__dict__[child_el.tag] = False if child_el.text.strip() == "false" else True elif child_el.tag in self.implicit_list_atts: # these can appear multiple times w/ different vals if hasattr(self, child_el.tag): self.__dict__[child_el.tag].append(asciify(child_el.text)) else: self.__dict__[child_el.tag] = [asciify(child_el.text)] elif len(child_el) > 0: # list (e.g., actors, genres) self.__dict__[child_el.tag] = [asciify(el.text) if el.text is not None else "" for el in child_el] elif len(child_el.attrib) == 0 and child_el.text is None: # just a predicate self.__dict__[child_el.tag] = True elif len(child_el.attrib) == 0 and (child_el.tag.startswith('num') or child_el.tag in self.numeric_fields): self.__dict__[child_el.tag] = float(child_el.text.replace(",","").replace("$","")) elif len(child_el.attrib) == 0: self.__dict__[child_el.tag] = asciify(child_el.text) except Exception: print ET.tostring(child_el) import sys sys.exit(1)
def json_to_elem(jsonobj): ''' >>> o = '{"tag1": "1", "tag2": "2"}' >>> json_to_elem(json.loads(o)) <tag1>1</tag1> <tag2>2</tag2> ''' attrs = {} text = '' subels = [] for key in jsonobj.keys(): value = jsonobj[key] if isinstance(value, dict): for k, v in value.items(): if k[:1] == "@": attrs[k[1:]] = v elif k == "#text": text = v elif isinstance(value, dict): tail = v else: text = value e = ET.Element(key, attrs) e.text = text #e.tail = tail print ET.tostring(e)
def write_xml(tree,filename=None) : if filename : f = file(filename,"w") f.write(ET.tostring(tree)) f.close() else : print ET.tostring(tree)
def test_merge_1(self): root= et.fromstring(xml_file5) x= merge(root) y= et.fromstring(xml_file6) z=et.tostring(y) string = et.tostring(x) self.assert_(string == z)
def get_info(self, request): path = self.translate_path(request.form['path']) parts = path.partition('/representations') ip = parts[0] hrefs = self._get_href_variations(parts[1] + parts[2]) namespace = '{http://ead3.archivists.org/schema/}' tree = ET.parse('%s/metadata/descriptive/EAD.xml' % ip) # regular file - daoset for href in hrefs: did_list = tree.findall(".//%sdid/*/%sdao[@href='%s']/../.." % (namespace, namespace, href)) if did_list: o = xmltodict.parse(ET.tostring(did_list[0])) return json.dumps(o) # regular file - no daoset for href in hrefs: did_list = tree.findall(".//%sdid/%sdao[@href='%s']/.." % (namespace, namespace, href)) if did_list: o = xmltodict.parse(ET.tostring(did_list[0])) return json.dumps(o) # directory for href in hrefs: did_list = tree.findall(".//%sc[@base='%s']/%sdid" % (namespace, href, namespace)) if did_list: o = xmltodict.parse(ET.tostring(did_list[0])) return json.dumps(o) # fallback return flask.jsonify( error=404, error_text='Not Found', info='No metadata associated to this element' )
def main(): """ :return: """ with open("input_data.txt") as f: i = 0 statements = [] statement = ET.Element("program") statements.append(statement) ET.SubElement(statement, "result").text = "a" for line in f: statement_components = (line.strip()).split(" ") statement = ET.Element(statement_components.pop()) statements.append(statement) i += 1 statement_components.pop() elements = len(statement_components) if elements == 1: # a simple assignment statement.text = statement_components.pop() elif elements == 2: # unary operation and assignment operand = statement_components.pop() operator = ET.SubElement(statement, operators[statement_components.pop()]) operator.text = operand elif elements == 3: # binary operation and assignment operand = statement_components.pop() operator = ET.SubElement(statement, operators[statement_components.pop()]) operator.text = "(" + statement_components.pop() + ", " + operand + ")" print ET.tostring(statements[i])
def searchAllServers(self, query): xml_combined = None for server in self.servers: root = ET.fromstring(server.getSearchXML(query)) data = ET.ElementTree(root).getchildren() for cont in data.iter('MediaContainer'): if xml_combined is None: xml_combined = data #insertion_point = xml_combined.findall("./MediaContainer")[0] insertion_point = data else: insertion_point.append(cont) for server in self.sharedServers: data = ET.ElementTree(ET.fromstring(server.getSearchXML(query))) for cont in data.iter('MediaContainer'): if xml_combined is None: xml_combined = data #insertion_point = xml_combined.findall("./MediaContainer")[0] insertion_point = data else: insertion_point.append(cont) dprint(__name__, 0, "test: {0}", ET.tostring(xml_combined)) return ET.tostring(xml_combined)
def Serialize(root, uri_prefixes=None, pretty_print=True): """Serializes XML to a string.""" root_copy = ElementTree.fromstring(ElementTree.tostring(root)) SetPrefixes(root_copy, uri_prefixes or {}) if pretty_print: Indent(root_copy) return ElementTree.tostring(root_copy)
def check_text_values(filename, cases_to_check=['addr:city', 'addr:housename', 'addr:street']): categories = { 'all_capital': re.compile(r'^([A-Z]| )+$'), 'all_small': re.compile(r'^([a-z]| )+$'), 'unicode_text': re.compile(r'[0-9]+'), 'other': re.compile(r'\S') } keys = categories.keys() categories_set = {j: {k: set() for k in keys} for j in cases_to_check} for event, elem in ET.iterparse(filename): if elem.tag in ['node', 'way']: for tag in elem.findall('tag'): k = tag.attrib['k'] v = tag.attrib['v'] if k == 'addr:housename' and v == u'Calle Santa Mar\xeda n\xba8, 48005 Bilbao': print ET.tostring(elem) if k in cases_to_check: if categories['all_capital'].search(v): categories_set[k]['all_capital'].add(v) elif categories['all_small'].search(v): categories_set[k]['all_small'].add(v) elif categories['unicode_text'].search(v) is None: categories_set[k]['unicode_text'].add(v) else: categories_set[k]['other'].add(v) pprint.pprint(categories_set)
def feedback(items): feedback = ET.Element("items") def processItem(item): itemToAdd = ET.SubElement(feedback, "item") data = item.get() for (k, v) in data["attrib"].iteritems(): if v is None: continue itemToAdd.set(k, v) for (k, v) in data["content"].iteritems(): if v is None: continue if k != "fileIcon" and k != "fileType": child = ET.SubElement(itemToAdd, k) child.text = v if k == "icon": if "fileIcon" in data["content"].keys(): if data["content"]["fileIcon"] == True: child.set("type", "fileicon") if "fileType" in data["content"].keys(): if data["content"]["fileType"] == True: child.set("type", "filetype") if isinstance(items, list): for anItem in items: processItem(anItem) else: processItem(items) print ET.tostring(feedback, encoding="utf-8")
def retornaPlantilla(nombreDirectorioPlantillas,xmlEntradaObject,cantidadAlternativas, tipoPregunta, **kwuargs): #,xmlEntradaObject): #tipoPregunta=nombres.nombreScript(__file__) contador=0 banderaEstado=False if 'directorioSalida' in kwuargs.keys(): banderaEstado=True #Indica si se debe imprimir o no el estado de la cantidad de salidas for plantilla in recogePlantillas(nombreDirectorioPlantillas,tipoPregunta): plantillaSalida=xmlSalida.plantillaGenericaSalida() for subRaizSalida in plantillaSalida.iter(): if subRaizSalida.tag=='plantilla': subRaizSalida.set('tipo',xmlEntradaObject.tipo) subRaizSalida.set('id',xmlEntradaObject.id) subRaizSalida.set('idOrigenEntrada',xmlEntradaObject.idOrigenEntrada) if subRaizSalida.tag=='enunciado': subRaizSalida.text=plantilla.enunciado.replace('@termino',xmlEntradaObject.termino) if subRaizSalida.tag=='opciones': for conjuntoAlternativas in xmlEntradaObject.agrupamientoAlternativas2(cantidadAlternativas): contador+=1 identificadorItem,identificadorAlternativas=xmlSalida.incrustaAlternativasXml(subRaizSalida, conjuntoAlternativas) if banderaEstado==True: xmlSalida.escribePlantilla(kwuargs['directorioSalida'],xmlEntradaObject.tipo, identificadorItem+' '+identificadorAlternativas+' '+str(contador), plantillaSalida,'xml') else: print ET.tostring(plantillaSalida, 'utf-8', method="xml") if banderaEstado==True: print str(contador)+' Creados' pass
def et_to_string(element): xml = '<?xml version="1.0" ?>' try: xml += ET.tostring(element, encoding="unicode") except LookupError: xml += ET.tostring(element) return xml
def open_soap_envelope(text): """ :param text: SOAP message :return: dictionary with two keys "body"/"header" """ try: envelope = defusedxml.ElementTree.fromstring(text) except Exception as exc: raise XmlParseError("%s" % exc) assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE assert len(envelope) >= 1 content = {"header": [], "body": None} for part in envelope: if part.tag == '{%s}Body' % soapenv.NAMESPACE: assert len(part) == 1 content["body"] = ElementTree.tostring(part[0], encoding="UTF-8") elif part.tag == "{%s}Header" % soapenv.NAMESPACE: for item in part: _str = ElementTree.tostring(item, encoding="UTF-8") content["header"].append(_str) return content
def show_settings(root, path, option_name='--show-settings'): c = resolve_component_path(root, path.split('/'), option_name) s = msaview.presets.setting_types[c.msaview_classname].from_value(c, msaview.presets) e = etree.Element('settings') s.encode(e) msaview.preset.indent(e) print etree.tostring(e)
def xml_rawquery(self, xml_str, dump_xml=None): """ Accepts xmlQuery String and returns XML response String. No object manipulation is done in this method. """ if dump_xml == None: dump_xml = self.__dump_xml uri = self.uri() + '/nuova' if dump_xml in ImcUtils.AFFIRMATIVE_LIST: print '%s ====> %s' % (self.__imc, xml_str) xml_doc = ET.fromstring(xml_str) if self.__nossl: req = urllib2.Request(url=uri, data=ET.tostring(xml_doc)) opener = urllib2.build_opener(ImcUtils.SmartRedirectHandler()) resp = opener.open(req) if type(resp) is list: if len(resp) == 2 and (resp[0] == 302 or resp[0] == 301): uri = resp[1] req = urllib2.Request(url=uri, data=ET.tostring(xml_doc)) resp = urllib2.urlopen(req) #print "status code is:",f[0] #print "location is:", f[1] else: req = urllib2.Request(url=uri, data=ET.tostring(xml_doc)) resp = urllib2.urlopen(req) rsp = resp.read() if dump_xml in ImcUtils.AFFIRMATIVE_LIST: print '%s <==== %s' % (self.__imc, rsp) return rsp
def to_xml_stream(self, stream, pretty=True, indent=' ', encoding='UTF-8', **kwargs): '''Dump object to a file object like stream.''' close = False if isinstance(stream, basestring): close = True if _xmldomext: stream = file(stream, 'w') else: stream = codecs.open(stream, mode='w', encoding=encoding, errors='replace') try: e = self.to_xml_elt(**kwargs) if pretty: if _xmldomext: PrettyPrint(Sax2.Reader().fromString(ElementTree.tostring(e)), stream=stream, encoding=encoding, indent=indent, preserveElements=None) else: # minidom.parseString( # ElementTree.tostring(e)).writexml( # stream, addindent=indent, newl='\n') pretty_indent(e) stream.write(ElementTree.tostring(e)) else: d = ElementTree.ElementTree(e) #d.write(stream, xml_declaration=True, method="xml") d.write(stream, encoding=encoding, xml_declaration=True, method="xml") finally: if close: stream.close() return e
def _to_rspec(self, slice): """ See L{GENIAggregate._to_rspec}. """ # get all the reserved nodes reserved = PlanetLabNode.objects.filter( aggregate__pk=self.pk, slice_set=slice) # Get the ids of all reserved nodes node_ids = reserved.values_list("node_id", flat=True) rspec = "%s" % self.rspec # parse the rspec root = et.fromstring(rspec) # get a mapping from node id to node elem (since this version of # elementtree doesn't have XPath working well. node_elems = root.findall(".//node") node_dict = {} for node_elem in node_elems: id = node_elem.get("id", None) if id: node_dict[id] = node_elem # for each node_id in the reservation, find the corresponding # node_elem and add a sliver tag. for node_id in node_ids: node_elem = node_dict[node_id] et.SubElement(node_elem, "sliver") logger.debug("Sending PlanetLab Reservation RSpec:\n%s" % et.tostring(root)) return et.tostring(root)
def addtorow(self, xpath, data, elem=None): fullp='/'.join(xpath) path=None if elem!=None: s=self.schemagetall if fullp in s: path=fullp else: shortp=pathwithoutns(xpath) if shortp in s: path=shortp if path == None: return try: data = cleansubtree.match(etree.tostring(elem)).groups()[0] except AttributeError: data = etree.tostring(elem) else: s=self.schema if fullp in s: path=fullp else: shortp=pathwithoutns(xpath) if shortp in s: path=shortp if path==None: if self.strict==2 and elem==None: path=xpath self.resetrow() msg='Undeclared path in XML-prototype was found in the input data. The path is:\n' shortp='/'+pathwithoutns(path) fullp='/'+'/'.join(path) if shortp!=fullp: msg+=shortp+'\n' msg+=fullp+'\nThe data to insert into path was:\n'+functions.mstr(data) raise etree.ParseError(msg) else: if self.row[s[path][0]]=='': self.row[s[path][0]]=data.replace('\t', self.tabreplace) return i=1 attribnum=path+'1' oldattribnum=path while attribnum in s: if self.row[s[attribnum][0]]=='': self.row[s[attribnum][0]]=data.replace('\t', self.tabreplace) return i+=1 oldattribnum=attribnum attribnum=path+str(i) self.row[s[oldattribnum][0]]+='\t'+data.replace('\t', self.tabreplace)
def __CoreErrorList(self): '''return a list of error strings''' s = [] if not self.PipelineNode is None: s.append("Pipeline Element: " + ElementTree.tostring(self.PipelineNode, encoding='utf-8') + '\n') if not self.VolumeElem is None: s.append("Volume Element: " + ElementTree.tostring(self.VolumeElem, encoding='utf-8') + '\n'); return s
def test_register(self): utils.register_namespace('foo', 'http://pulpproject.org/foo') root = ET.fromstring(self.DUMMY_XML) # if the registration didn't work, the namespace "foo" will instead # show up as "ns0" self.assertTrue(ET.tostring(root).find('<foo:dummyelement') >= 0) self.assertEqual(ET.tostring(root).find('ns0'), -1)
def retornaPlantilla(nombreDirectorioPlantillas,xmlEntradaObject,cantidadAlternativas, tipoPregunta,raiz,formato,estilo, **kwuargs): #,xmlEntradaObject): contador=0 banderaEstado=False enunciado="" if 'directorioSalida' in kwuargs.keys(): banderaEstado=True #Indica si se debe imprimir o no el estado de la cantidad de salidas for plantilla in recogePlantillas(nombreDirectorioPlantillas,tipoPregunta): if xmlEntradaObject.linkPlantilla(plantilla)==False: continue plantillaSalida=xmlSalida.plantillaGenericaSalida(xmlEntradaObject.puntaje,xmlEntradaObject.shuffleanswers,xmlEntradaObject.penalty,xmlEntradaObject.answernumbering) plantillaSalida.set('tipo',xmlEntradaObject.tipo) #unico tipo de item que cambia plantillaSalida.set('type',"essay") answer=ET.SubElement(plantillaSalida,'answer') answer.set('fraction',"0") answerFeedback=ET.SubElement(answer,'feedback') answerFeedbackText=ET.SubElement(answerFeedback,'text') plantillaSalida.set('idOrigenEntrada',xmlEntradaObject.idOrigenEntrada) plantillaSalida.set('taxonomia',plantilla.taxo) responsefieldlines=ET.SubElement(plantillaSalida,'responsefieldlines') responsefieldlines.text="300" for codigoPython in xmlEntradaObject.codigos: #lista de archivos temporales por entrada anidada al codigo contadorEntradasBruto=0 glosaEnunciado="" for archivoTemporal in codigoPython["codigo"]: streamTraza=obtieneTraza(ejecutaPyTemporal(archivoTemporal)) if len(streamTraza)>0: normalizaLineas(streamTraza)#Normaliza numero de lineas else: banderaEstado="No trazable" streamTraza=estandarizaLineas(streamTraza,codigoPython["nombreFuncionPrincipal"])#Pasa las lineas a formato String idXmlSalida=incluyeInfo(codigoPython,plantillaSalida,contadorEntradasBruto,plantilla.enunciado[:],mergeLineas(streamTraza),answerFeedbackText) if banderaEstado==True: #id=str(xmlEntradaObject.idOrigenEntrada)+"."+idXmlSalida id= xmlEntradaObject.idItem(plantilla,tipoPregunta,idXmlSalida) plantillaSalida.set('id',id) for elem in plantillaSalida.getchildren(): if elem.tag=='name': for elem2 in elem.iterfind('text'): elem2.text=id if raiz=='quiz': quiz = ET.Element('quiz') quiz.append(plantillaSalida) xmlSalida.escribePlantilla2(kwuargs['directorioSalida'],xmlEntradaObject.tipo,id,quiz,'xml',formato,estilo,merge=raiz) else: xmlSalida.escribePlantilla2(kwuargs['directorioSalida'], xmlEntradaObject.tipo,id,plantillaSalida,'xml',formato,estilo,merge=raiz) contador+=1 elif banderaEstado==False: print ET.tostring(plantillaSalida, 'utf-8', method="xml") contador+=1 else: print "Error 13: La funcion '"+codigoPython["nombreFuncionPrincipal"] +"' o su entrada: '"+codigoPython["entradasBruto"][contadorEntradasBruto]+"' presenta una falla y no se puede Trazar" banderaEstado=True contadorEntradasBruto+=1 if banderaEstado==True: print xmlEntradaObject.idOrigenEntrada+"->"+str(contador)+' Creados' pass
def add_sub_counts(self): # Problem: no senses # # Since we have only the lemma and the POS, and no word sense, the word counts # are added to each lexical unit with matching form-spelling and form-cat, # regardless of its sense (i.e. the value of the "c_seq_nr" attribute). # So for example xxx:noun:1 and xxx:noun:2 receive the same count value, # i.e. count( xxx:noun:1) = n and count(xxx:noun:2) = n. # Now suppose yyyy:noun:1 is a subsumer of both xxx:noun:1 and xxx:noun:2, # which happens quite frequently, then its subcount is incremented by n twice! # This is not what we want, because it means the contribution of words with # multiple senses is overestimated. # # Solution: # We keep a track of lexical units already vistited while processing this word form. # That is, upon visiting yyyy:noun:1 as a subsumer of xxx:noun:2, it is skipped. # # Note: # The problem does not occur across categories (verified). That is, it never happens # that xxx:noun and xxx:verb share a common subsumer. This simplifies matters and # saves us some administration. for lus in self._form2lu.values(): visited = dict.fromkeys(lus) for lu in lus: form = lu.find("form") try: count = int(form.get("count")) except (AttributeError, TypeError): # form or count not found stderr.write("Warning: no <form> element or 'count' attribute in:\n" + tostring(lu).encode("utf-8") + "\n") continue if not form.get("subcount"): # init subcount to count itself, unless it was already visited as a subsumer form.set("subcount", str(count)) # FIXME: reimplementing this without the overhead of keeping # track of the distance would be faster successors = self._transitive_closure([lu], "HAS_HYPERONYM").keys() for succ_lu in successors: if succ_lu not in visited: succ_lu_form = succ_lu.find("form") try: succ_count = succ_lu_form.get("count") old_subcount = int(succ_lu_form.get("subcount", succ_count)) except (TypeError, AttributeError): # form or count not found stderr.write("Warning: no <form> element or 'count' attribute in:\n" + tostring(succ_lu).encode("utf-8") + "\n") continue new_subcount = old_subcount + count succ_lu_form.set("subcount", str(new_subcount))
def get_prod(prod): lhs = prod.find("./lhs") rhses = prod.findall("./rhs") for rhs in rhses: for com in rhs.findall("./com"): strip_element(rhs, com) lhs_str = etree.tostring(lhs, "utf-8", "text").decode("utf-8") rhses_str = map(lambda x: etree.tostring(x, "utf-8", "text").decode("utf-8"), rhses) return "%s ::= %s" % (lhs_str.strip(), spaces.sub(" ", "".join(rhses_str)).strip())
def append(element, newchild): """Append new child ONLY if it's not a duplicate""" if element not in element_child_cache: element_child_cache[element] = dict() newid = newchild.get("id") existing = element_child_cache[element].get(newid, None) if existing is not None: # ID is identical and OVAL entities are identical if oval_entities_are_identical(existing, newchild): # Moreover the entity is OVAL <external_variable> if oval_entity_is_extvar(newchild): # If OVAL entity is identical to some already included # in the benchmark and represents an OVAL <external_variable> # it's safe to ignore this ID (since external variables are # in multiple checks just to notify 'testoval.py' helper to # substitute the ID with <local_variable> entity when testing # the OVAL for the rule) pass # Some other OVAL entity else: # If OVAL entity is identical, but not external_variable, the # implementation should be rewritten each entity to be present # just once sys.stderr.write("ERROR: OVAL ID '%s' is used multiple times " "and should represent the same elements.\n" % (newid)) sys.stderr.write("Rewrite the OVAL checks. Place the identical " "IDs into their own definition and extend " "this definition by it.\n") sys.exit(1) # ID is identical, but OVAL entities are semantically difference => # report and error and exit with failure # Fixes: https://github.com/OpenSCAP/scap-security-guide/issues/1275 else: if not oval_entity_is_extvar(existing) and \ not oval_entity_is_extvar(newchild): # This is an error scenario - since by skipping second # implementation and using the first one for both references, # we might evaluate wrong requirement for the second entity # => report an error and exit with failure in that case # See # https://github.com/OpenSCAP/scap-security-guide/issues/1275 # for a reproducer and what could happen in this case sys.stderr.write("ERROR: it's not possible to use the " + "same ID: %s " % newid + "for two " + "semantically different OVAL entities:\n") sys.stderr.write("First entity %s\n" % ElementTree.tostring(existing)) sys.stderr.write("Second entity %s\n" % ElementTree.tostring(newchild)) sys.stderr.write("Use different ID for the second entity!!!\n") sys.exit(1) else: element.append(newchild) element_child_cache[element][newid] = newchild
def submit_to_prj(self, act, project, force_enable_build=False): """ Links sources from request to project :param act: action for submit request :param project: project to link into :param force_enable_build: overwrite the ring criteria to enable or disable the build """ src_prj = act.src_project src_rev = act.src_rev src_pkg = act.src_package tar_pkg = act.tgt_package disable_build = False # The force_enable_build will avoid the # map_ring_package_to_subproject if not force_enable_build: if self.crings and not self.ring_packages.get(tar_pkg) and not self.is_adi_project(project): disable_build = True logging.warning("{}/{} not in ring, build disabled".format(project, tar_pkg)) else: project = self.map_ring_package_to_subject(project, tar_pkg) self.create_package_container(project, tar_pkg, disable_build=disable_build) # expand the revision to a md5 url = self.makeurl(['source', src_prj, src_pkg], {'rev': src_rev, 'expand': 1}) f = http_GET(url) root = ET.parse(f).getroot() src_rev = root.attrib['srcmd5'] src_vrev = root.attrib.get('vrev') # link stuff - not using linkpac because linkpac copies meta # from source root = ET.Element('link', package=src_pkg, project=src_prj, rev=src_rev) if src_vrev: root.attrib['vrev'] = src_vrev url = self.makeurl(['source', project, tar_pkg, '_link']) http_PUT(url, data=ET.tostring(root)) for sub_prj, sub_pkg in self.get_sub_packages(tar_pkg): sub_prj = self.map_ring_package_to_subject(project, sub_pkg) # print project, tar_pkg, sub_pkg, sub_prj if sub_prj == project: # skip inner-project links continue self.create_package_container(sub_prj, sub_pkg) root = ET.Element('link', package=tar_pkg, project=project) url = self.makeurl(['source', sub_prj, sub_pkg, '_link']) http_PUT(url, data=ET.tostring(root)) return tar_pkg
def test_graph_attr_unmarshal(): attrElement = et.Element( 'attribute', \ { 'name':'test_attr', 'id':'test_id', \ 'value':'100', 'hidden':'true' } ) attrElement.text = '100 bp' attr = GraphAttribute.fromElement( attrElement ) print 'Original: ' print et.tostring(attrElement) print 'Unmarshalled: ' print attr.toXml()
def printNode(self,removeDuplicates): """ Prints the cElementTree node of the document. For debugging only. @type removeDuplicates: boolean @param removeDuplicates: merge duplicates? """ node = self.getNode(removeDuplicates) indent(node) print ET.tostring(node)
def _action(self, name, numargs, additional, *args): """Produces the data and transceives it""" if (len(args) < numargs) or (not additional and len(args) != numargs): raise TypeError( name + "() takes exactly " + str(numargs) + " argument" + ("" if numargs == 1 else "s") + "(" + str(len(args)) + " given)" ) # Create document transmission = etree.Element("transmission") reflect = etree.Element("reflect") action = etree.Element("action", name=name) transmission.append(reflect) reflect.append(action) for arg in args[:numargs]: action.append(ReflectedTypeFactory(arg, self).to_element()) if additional: addelem = etree.Element("arguments") action.append(addelem) for arg in args[numargs:]: addelem.append(ReflectedTypeFactory(arg, self).to_element()) if self._debug: logsend.debug(etree.tostring(transmission, encoding="UTF-8")) # We must specify the encoding, or we won't get the <?xml ?> declaration response = self._transceive(etree.tostring(transmission, encoding="UTF-8")) if self._debug: logrecv.debug(response) if response: respelem = etree.fromstring(response).find("reflect/return-value") # Check we got back what we expected if not respelem: raise IOError("Transmission XML response does not contain return-value") # Process it based on whether it was a success or failure if respelem.get("type") == "success" and len(respelem) == 1: return ElementToReflectedType(respelem[0], self) elif respelem.get("type") == "success" and len(respelem) != 1: raise TypeError("Success response does not have exactly one response element") else: raise JavaReflectionException(respelem.get("errormsg", "Unknown error occurred")) else: raise IOError(1, "Empty response retrieved from action")
def check_weird_keys(filename): weirdkeys = {'node': ['CODIGO', 'FIXME', 'naptan:CommonName', 'naptan:Indicator', 'naptan:Street', 'ref:RRG'], 'way': ['FIXME', 'N', u'Torre\xf3n del castillo de los Salazar', 'fuel:']} for event, elem in ET.iterparse(filename): if elem.tag in weirdkeys.keys(): for elem_tag in elem.findall('tag'): k = elem_tag.attrib['k'] if k in weirdkeys[elem.tag]: print ET.tostring(elem) print k, elem_tag.attrib['v'] print ''
def trans_json(json_ltp=[]): #定义全局变量 global phsen_ana, env_ws, envw_ana phsen_ana = [] logic_sens = [] logic_ele = {} #主干节点的逻辑语义元素,含依存的二三级语义 json_str = '' for sen_dics in json_ltp[0]: # 每个句子送回的分析字典列表,目前总是只会送一段 logic_sens.append("") # 每个新句子建立一个逻辑语义串,逻辑元素和主干接点id清0 logic_ele = {} # 每句之中的主干逻辑元素 tree_line = [] #描述每个词需要几次才能连到hed上 #词语信息分类。structure.sen_ana thesen_ana = sen_ana() #初始化句子分析对象(车厢) thew_anas = [] #初始化句子分析对象(句子内一队词语) the_ws = [] #词林分类使用的单词列表 list_HED = [] #现在把CV也当做HED等级。核心关系 HED wgrs = [] #依存串也送入句子分析,每局做个临时的列表 for h_dic in sen_dics: #开始专门找‘HED’,并列动词处于同等地位。核心关系 HED t_wo = h_dic['cont'] #原句子内容 w_l = min(2, len(t_wo) - 1) #区分1,2,3以上字数的词 t_r = h_dic['relate'] if t_r == 'HED': list_HED.append(h_dic['id']) logic_ele[h_dic['id']] = 'V=' + t_wo elif t_r == 'COO': if h_dic['pos'] == 'v': h_dic['relate'] = 'COV' #单独标识并列的动词 list_HED.append(h_dic['id']) elif t_r == 'WP': t_id = h_dic['id'] if t_id < len(sen_dics) - 1: if sen_dics[t_id + 1]['cont'] == '”': sen_dics[t_id + 1]['cont'] = ' ' h_dic['cont'] += '”' Pos_ = PosTag.pos_cn(h_dic['pos']) thisw_ana = w_ana(wo=t_wo, pos=Pos_) #构造加载新词汇信息 if '助词介词连词人名地名'.find(thisw_ana.pos) > -1: thisw_ana.syn = t_wo thisw_ana.cla = Pos_ env_ws[w_l].append(t_wo) envw_ana[w_l].append(thisw_ana) elif thisw_ana.pos == '方位名词': thisw_ana.syn = t_wo thisw_ana.cla = Pos_ env_ws[w_l].append(t_wo) envw_ana[w_l].append(thisw_ana) thew_anas.append(thisw_ana) tsen_in = '' for w_dic in sen_dics: #回头找一级主干依存 json_str += str(w_dic) + '\n' the_wo = w_dic['cont'] tsen_in += the_wo + ' ' the_ws.append(the_wo) this_line = "" this_wgr = '' if w_dic['relate'] == 'HED': this_line = '-1_' if w_dic[ 'parent'] in list_HED: #以下是与HED关联的主干成分:主谓宾,直接谓语,并列谓语CV,parent=HED的id t_r = w_dic['relate'] if t_r in Yufa_dic: logic_ele[w_dic['id']] = Yufa_dic[ w_dic['relate']] + '=' + w_dic['cont'] #先添加主干语义元素内容 if t_r == 'VOB': print(the_wo) #在对上级依存为SBV,且被依存关系中有SVB/动补、后附加时,这个宾语同时也是谓语,添加一个V t_id = w_dic['id'] for w_d in sen_dics: if w_d['parent'] == t_id: if 'VOBCMPRAD'.find(w_d['relate']) > -1: logic_ele[w_dic['id']] = logic_ele[ w_dic['id']].replace('=', 'V=') this_line += str(w_dic['id']) + '_' this_wgr += w_dic['cont'] + '_' else: print('========' + w_dic['relate']) else: par = w_dic['parent'] # 用tree_line表示依存线路 while not (par in list_HED): # 顺着parent找HED的id,找到为止,形成一个串 if this_line.find('-1') > -1: break this_line = str(par) + '_' + this_line this_wgr += sen_dics[par]['cont'] + '_' par = sen_dics[par]['parent'] this_line += str(w_dic['id']) + '_' this_wgr += w_dic['cont'] + '_' tree_line.append(this_line.strip('_')) # 依存线索id串 wgrs.append(this_wgr) #语义关联字符串,和id串对应 cizu_temp = {} ii = 0 # 循环中辅助计数 for w_dic in sen_dics: #再回头,找主干之后的二级依存,根据依存串描述,二级加标签用冒号挂接,三级直接按顺序用数组连上 tree_node = tree_line[ii].split('_') if tree_node[0] == '-1': del tree_node[0] if len(tree_node) > 1: t_key = tree_node[0] + '_' + tree_node[1] if t_key in cizu_temp: cizu_temp[t_key] = cizu_temp[t_key] + '+' + w_dic[ 'cont'] # 以前两级为key,词组为值,写入字典 else: cizu_temp[t_key] = w_dic['cont'] ii = ii + 1 for key in cizu_temp: any_rela = sen_dics[int(key.split('_')[1])][ 'relate'] #词组key前一部分是一级id,标识一级ele字典,后一部分是二级id,用两冒号挂接 logic_ele[int(key.split( '_')[0])] += '::' + Yufa_dic[any_rela] + '=' + cizu_temp[key] for key in logic_ele: logic_sens[-1] += logic_ele[key] + ',' #查询文本文件方式调用 #dics = cla_search (the_ws) #分类函数返回value为syn和cla的两个字典 #查询数据库方式调用 dics = newCla_search(the_ws) #分类函数返回value为syn和cla的两个字典 ww = 0 for wa_dic in sen_dics: #装载段落分析列车信息 t_wo = wa_dic['cont'] w_l = min(2, len(t_wo) - 1) #区分1,2,3以上字数的词 thew_anas[ww].wgr = wgrs[ww] if thew_anas[ww].syn == '': #连词助词介词等虚词已经预先装载,跳过类型搜索 if t_wo in env_ws[w_l]: e_id = env_ws[w_l].index(t_wo) thew_anas[ww].syn = envw_ana[w_l][e_id].syn thew_anas[ww].cla = envw_ana[w_l][e_id].cla elif t_wo in dics[0]: thew_anas[ww].syn = dics[0][t_wo] thew_anas[ww].cla = dics[1][t_wo] env_ws[w_l].append(thew_anas[ww].wo) envw_ana[w_l].append(thew_anas[ww]) ww = ww + 1 thesen_ana = sen_ana(sen_in=tsen_in.strip(' '), sen_mean=logic_sens[-1], w_anas=thew_anas) phsen_ana.append(thesen_ana) logsen_fix(logic_sens) wr_logic = '' wr_sen = '' time1 = str(time.time()) new = False if new: for sen in phsen_ana: wr_logic += sen.sen_mean + '\n' wr_sen += sen.sen_in tree0 = ET.ElementTree( file='d:/xml/段落语义ltp.xml') # xml读写的模板,标签写入、模板插入方式大不相同,就分头写算了 tree0.getroot() root = tree0.getroot() #从某文件提取一个模板,加上一个根节点a,再写入指定文档 xx = ET.tostring(root.find('阅读场景'), encoding='UTF-8', method="xml") yy = ET.fromstring(xx) yy.find('句式语义').text = wr_logic yy.find('原文').text = wr_sen yy.find('依存句法').text = json_str root.insert(1, yy) tree0.write('d:/xml/段落语义ltp.xml', encoding='UTF-8') set_phsen(ph=phsen_ana) return phsen_ana
def __init__(self, filename=None, run=None, overwrite=False): cElementTree.register_namespace("", "http://psi.hupo.org/ms/mzml") self.filename = filename self.lookup = {} self.newTree = None self.TreeBuilder = cElementTree.TreeBuilder() self.run = run self.info = {'counters': ddict(int)} if self.run.info['filename'].endswith('.gz'): import gzip import codecs io = codecs.getreader("utf-8")(gzip.open(self.run.info['filename'])) else: io = open(self.run.info['filename'], 'r') #read the rest as original file input_xml_string = '' pymzml_tag_written = False #open again to read as text! for line in open(self.run.info['filename'], 'r').readlines(): if 'indexedmzML' in line: # writing of indexed mzML is not possible at the moment continue if 'run' in line: # the run is appended from the original parser to avoid messing # with the new xml tree, we break before the run data starts break input_xml_string += line if 'softwareList' in line and pymzml_tag_written is False: addon = cElementTree.Element( 'software', { 'id' : 'pymzML', 'version' : "0.7.6" } ) cElementTree.SubElement( addon, 'cvParam', { 'accession' : 'MS:1000531', 'cvRef' : 'MS', 'name' : 'pymzML Writer', 'version' : '0.7.6', } ) new_line = cElementTree.tostring(addon, encoding='utf-8') input_xml_string += new_line pymzml_tag_written = True input_xml_string += '</mzML>\n' self.newTree = cElementTree.fromstring(input_xml_string) for event, element in cElementTree.iterparse(io, events=(b'start', b'end')): if event ==b'start': if element.tag.endswith('}run'): self.lookup['run'] = cElementTree.Element(element.tag, element.attrib) if element.tag.endswith('}spectrumList'): self.lookup['spectrumList'] = \ cElementTree.Element(element.tag, element.attrib) self.lookup['spectrumIndeces'] = \ cElementTree.Element('index', {'name': 'spectrum'}) break return
def node_text(n): try: return et.tostring(n, method='html', with_tail=False) except TypeError: return str(n)
def _convert(tree: Element) -> str: return unescape(tostring(tree, encoding="unicode"))
def pretty_XML(xml): return minidom.parseString(ET.tostring(xml)).toprettyxml(indent='\t')
def write(self, stream, nodes, mode=MeshWriter.OutputMode.BinaryMode): self._archive = None # Reset archive archive = zipfile.ZipFile(stream, "w", compression=zipfile.ZIP_DEFLATED) try: model_file = zipfile.ZipInfo("3D/3dmodel.model") # Because zipfile is stupid and ignores archive-level compression settings when writing with ZipInfo. model_file.compress_type = zipfile.ZIP_DEFLATED # Create content types file content_types_file = zipfile.ZipInfo("[Content_Types].xml") content_types_file.compress_type = zipfile.ZIP_DEFLATED content_types = ET.Element("Types", xmlns=self._namespaces["content-types"]) rels_type = ET.SubElement( content_types, "Default", Extension="rels", ContentType= "application/vnd.openxmlformats-package.relationships+xml") model_type = ET.SubElement( content_types, "Default", Extension="model", ContentType= "application/vnd.ms-package.3dmanufacturing-3dmodel+xml") # Create _rels/.rels file relations_file = zipfile.ZipInfo("_rels/.rels") relations_file.compress_type = zipfile.ZIP_DEFLATED relations_element = ET.Element( "Relationships", xmlns=self._namespaces["relationships"]) model_relation_element = ET.SubElement( relations_element, "Relationship", Target="/3D/3dmodel.model", Id="rel0", Type= "http://schemas.microsoft.com/3dmanufacturing/2013/01/3dmodel") savitar_scene = Savitar.Scene() metadata_to_store = CuraApplication.getInstance().getController( ).getScene().getMetaData() for key, value in metadata_to_store.items(): savitar_scene.setMetaDataEntry(key, value) current_time_string = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S") if "Application" not in metadata_to_store: # This might sound a bit strange, but this field should store the original application that created # the 3mf. So if it was already set, leave it to whatever it was. savitar_scene.setMetaDataEntry( "Application", CuraApplication.getInstance().getApplicationDisplayName()) if "CreationDate" not in metadata_to_store: savitar_scene.setMetaDataEntry("CreationDate", current_time_string) savitar_scene.setMetaDataEntry("ModificationDate", current_time_string) transformation_matrix = Matrix() transformation_matrix._data[1, 1] = 0 transformation_matrix._data[1, 2] = -1 transformation_matrix._data[2, 1] = 1 transformation_matrix._data[2, 2] = 0 global_container_stack = Application.getInstance( ).getGlobalContainerStack() # Second step: 3MF defines the left corner of the machine as center, whereas cura uses the center of the # build volume. if global_container_stack: translation_vector = Vector( x=global_container_stack.getProperty( "machine_width", "value") / 2, y=global_container_stack.getProperty( "machine_depth", "value") / 2, z=0) translation_matrix = Matrix() translation_matrix.setByTranslation(translation_vector) transformation_matrix.preMultiply(translation_matrix) root_node = UM.Application.Application.getInstance().getController( ).getScene().getRoot() for node in nodes: if node == root_node: for root_child in node.getChildren(): savitar_node = self._convertUMNodeToSavitarNode( root_child, transformation_matrix) if savitar_node: savitar_scene.addSceneNode(savitar_node) else: savitar_node = self._convertUMNodeToSavitarNode( node, transformation_matrix) if savitar_node: savitar_scene.addSceneNode(savitar_node) parser = Savitar.ThreeMFParser() scene_string = parser.sceneToString(savitar_scene) archive.writestr(model_file, scene_string) archive.writestr( content_types_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(content_types)) archive.writestr( relations_file, b'<?xml version="1.0" encoding="UTF-8"?> \n' + ET.tostring(relations_element)) except Exception as e: Logger.logException("e", "Error writing zip file") self.setInformation( catalog.i18nc("@error:zip", "Error writing 3mf file.")) return False finally: if not self._store_archive: archive.close() else: self._archive = archive return True
def to_xml(self): """Generate XML representation for libvirt. Raises: InvalidVMConfigError """ if not self.__check(): raise InvalidVMConfigError domain = ET.Element('domain') domain.set('type', 'kvm') name = ET.SubElement(domain, 'name') name.text = self.name os = ET.SubElement(domain, 'os') tp = ET.SubElement(os, 'type') tp.text = 'hvm' kernel = ET.SubElement(os, 'kernel') kernel.text = self.image_path cmdline = ET.SubElement(os, 'cmdline') cmdline.text = 'console=ttyS0' + ('''{,, "blk" : {,, "source": "dev",, "path": "/dev/ld0a",, "fstype": "blk",, "mountpoint": "%s",, },, "net" : {,, "if": "vioif0",, "type": "inet",, "method": "static",, "addr": "10.0.120.101",, "mask": "24",, },, "cmdline": "%s",, },,''' % (self.data_volume_mount_point, self.cmdline)) memory = ET.SubElement(domain, 'memory') memory.text = str(self.memory_size) # Disks devices = ET.SubElement(domain, 'devices') disk = ET.SubElement(devices, 'disk') disk.set('type', 'file') disk.set('device', 'disk') source = ET.SubElement(disk, 'source') source.set('file', self.data_volume_path) target = ET.SubElement(disk, 'target') target.set('dev', 'sda') target.set('bus', 'virtio') driver = ET.SubElement(disk, 'driver') driver.set('type', 'raw') driver.set('name', 'qemu') # For debugging serial = ET.SubElement(devices, 'serial') serial.set('type', 'pty') target = ET.SubElement(serial, 'target') target.set('port', '0') console = ET.SubElement(devices, 'console') console.set('type', 'pty') target = ET.SubElement(console, 'target') target.set('port', '0') target.set('type', 'serial') return ET.tostring(domain).decode()
def enable_baselibs_packages(self, force=False, wipebinaries=False): self._init_biarch_packages() todo = dict() for pkg in self.packages: logger.debug("processing %s", pkg) if not pkg in self.package_metas: logger.error("%s not found", pkg) continue pkgmeta = self.package_metas[pkg] is_enabled = None is_disabled = None has_baselibs = None must_disable = None changed = None for n in pkgmeta.findall("./build/enable[@arch='{}']".format(self.arch)): is_enabled = True for n in pkgmeta.findall("./build/disable[@arch='{}']".format(self.arch)): is_disabled = True if force: must_disable = False if must_disable is None: if self.is_biarch_recursive(pkg): must_disable = False else: must_disable = True if must_disable == False: if is_disabled: logger.info('enabling %s for %s', pkg, self.arch) for build in pkgmeta.findall("./build"): for n in build.findall("./disable[@arch='{}']".format(self.arch)): build.remove(n) changed = True if changed == False: logger.error('build tag not found in %s/%s!?', pkg, self.arch) else: logger.debug('%s already enabled for %s', pkg, self.arch) elif must_disable == True: if not is_disabled: logger.info('disabling %s for %s', pkg, self.arch) bn = pkgmeta.find('build') if bn is None: bn = ET.SubElement(pkgmeta, 'build') ET.SubElement(bn, 'disable', { 'arch' : self.arch }) changed = True else: logger.debug('%s already disabled for %s', pkg, self.arch) if is_enabled: logger.info('removing explicit enable %s for %s', pkg, self.arch) for build in pkgmeta.findall("./build"): for n in build.findall("./enable[@arch='{}']".format(self.arch)): build.remove(n) changed = True if changed == False: logger.error('build tag not found in %s/%s!?', pkg, self.arch) if changed: todo[pkg] = pkgmeta if todo: logger.info("applying changes") for pkg in sorted(todo.keys()): pkgmeta = todo[pkg] try: pkgmetaurl = self.makeurl(['source', self.project, pkg, '_meta']) self.http_PUT(pkgmetaurl, data=ET.tostring(pkgmeta)) if self.caching: self._invalidate__cached_GET(pkgmetaurl) if wipebinaries and pkgmeta.find("./build/disable[@arch='{}']".format(self.arch)) is not None: logger.debug("wiping %s", pkg) self.http_POST(self.makeurl(['build', self.project], { 'cmd' : 'wipe', 'arch': self.arch, 'package' : pkg })) except urllib2.HTTPError as e: logger.error('failed to update %s: %s', pkg, e)
def write_gexf(graph, node_tools=GEXFNodeTools, edge_tools=GEXFEdgeTools): """ Export a graph to an GEXF data format Custom XML serializers may be introduced as a custom NodeTools class using the `node_tools` attribute. In addition, the graph ORM may be used to inject tailored `serialize` methods in specific nodes or edges. :param graph: Graph to export :type graph: :graphit:Graph :param node_tools: NodeTools class with node serialize method :type node_tools: :graphit:NodeTools :param edge_tools: EdgeTools class with node serialize method :type edge_tools: :graphit:EdgeTools :return: Graph exported as a hierarchical XML node structure :rtype: :py:str """ # Set current NodeTools and EdgeTools aside and register new one if not issubclass(node_tools, NodeTools): raise GraphitException( 'Node_tools ({0}) needs to inherit from the NodeTools class'. format(type(node_tools))) if not issubclass(edge_tools, EdgeTools): raise GraphitException( 'Edge_tools ({0}) needs to inherit from the NodeTools class'. format(type(edge_tools))) curr_nt = graph.node_tools curr_et = graph.edge_tools graph.node_tools = node_tools graph.edge_tools = edge_tools # Create GEXF root element and add meta-data root = et.Element('gexf') root.attrib = { 'xmlns': 'http://www.gexf.net/1.2draft', 'version': '1.2', 'xmlns:xsi': graph.data.get('xmlns:xsi', 'http://www.w3/org/2001/XMLSchema-instance'), 'xsi.schemaLocation': graph.data.get( 'xsi.schemaLocation', 'http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd' ) } root_meta = et.SubElement(root, 'meta') root_meta.attrib = {'lastmodifieddate': str(datetime.date.today())} for key, value in graph.data.items(): if key not in ('mode', 'lastmodifieddate'): meta = et.SubElement(root_meta, key) meta.text = str(value) root_graph = et.SubElement(root, 'graph') root_graph.attrib = { 'mode': graph.data.get('mode', 'static'), 'defaultedgetype': 'directed' if graph.directed else 'undirected' } # Add nodes if len(graph.nodes): root_nodes = et.SubElement(root_graph, 'nodes') for node in graph.iternodes(): node.serialize(tree=root_nodes) # Add edges if len(graph.edges): root_edges = et.SubElement(root_graph, 'edges') for edge in graph.iteredges(): edge.serialize(tree=root_edges) # Restore original NodeTools and EdgeTools graph.node_tools = curr_nt graph.edge_tools = curr_et # Return pretty printed XML using minidom.parseString return minidom.parseString(et.tostring(root)).toprettyxml(indent=" ")
def XML_prettystring(XML): indent(XML.getroot()) return (etree.tostring(XML.getroot()))
def loadNewFile(inputfile, con, cursor, sessionID, timestep): file = open(inputfile, 'r') #convert to string: data = file.read() #close file because we dont need it anymore: file.close() tree = et.fromstring(data) # resetDB(con,cursor) for el in tree.findall('domain/constant'): getConstantID(con, cursor, el.text) for el in tree.findall('domain/predicate'): getPredicateID(con, cursor, el.text) for el in tree.findall('trustnet/agent'): getAgentID(con, cursor, el.text) for el in tree.findall('scenario'): # print et.tostring(el) # print el # print '------------------------------' # print el.text # ''.join(node_text(n) for n in el.xpath('/node()')) try: cursor.execute( "Insert INTO scenarios (sessionID,timestep,scenario_text) values(%s,%s,%s);", (sessionID, timestep, et.tostring(el))) con.commit() except: con.rollback() for el in tree.findall('trustnet/trust'): trusterID = -1 trustedID = -1 level = -1 textAboutTrust = '' for t in el.findall('truster'): trusterID = getAgentID(con, cursor, t.text) for t in el.findall('trustee'): trustedID = getAgentID(con, cursor, t.text) for t in el.findall('level'): level = t.text for t in el.findall('scenarioText'): textAboutTrust = t.text try: cursor.execute( "Insert INTO agent_trust (trustingAgent,trustedAgent,level,isInferred,sessionID,timestep) values(%s,%s,%s,%s,%s,%s);", (trusterID, trustedID, level, 0, sessionID, timestep)) con.commit() except: con.rollback() try: cursor.execute( "Insert INTO agent_trust_text (trustingAgent,trustedAgent,sessionID,timestep,scenario_text) values(%s,%s,%s,%s,%s);", (trusterID, trustedID, sessionID, timestep, textAboutTrust)) con.commit() except: con.rollback() for el in tree.findall('beliefbase/belief'): agentID = -1 level = -1 beliefID = -1 for t in el.findall('agent'): agentID = getAgentID(con, cursor, t.text) for t in el.findall('level'): level = t.text for t in el.findall('fact'): isRule = 0 isInferred = 0 predicateHasConstant = getPredicateHasConstantID( con, cursor, t.text) if (predicateHasConstant[1] > 0): beliefID = getBeliefID(con, cursor, predicateHasConstant[0], predicateHasConstant[1], isRule) getAgentHasBeliefsID(con, cursor, agentID, beliefID, level, isInferred, sessionID, timestep) else: print "predicate has constant returned -1" sys.exit(1) for t in el.findall('rule'): isRule = 1 isInferred = 0 for c in t.findall('conclusion'): predicateHasConstant = getPredicateHasConstantID( con, cursor, c.text) if (predicateHasConstant[1] > 0): beliefID = getBeliefID(con, cursor, predicateHasConstant[0], predicateHasConstant[1], isRule) getAgentHasBeliefsID(con, cursor, agentID, beliefID, level, isInferred, sessionID, timestep) else: print "predicate has constant returned -1" sys.exit(1) for p in t.findall('premise'): predicateHasConstant = getPredicateHasConstantID( con, cursor, p.text) getPremiseID(con, cursor, beliefID, predicateHasConstant[0], predicateHasConstant[1]) for t in el.findall('scenarioText'): getAgentBeliefsText(con, cursor, agentID, beliefID, sessionID, timestep, t.text) for el in tree.findall('query'): agentID = -1 isAttack = 0 isRule = 0 for a in el.findall('agent'): agentID = getAgentID(con, cursor, a.text) for q in el.findall('question'): predicateHasConstant = getPredicateHasConstantID( con, cursor, q.text) questionID = getQuestionID(con, cursor, agentID, predicateHasConstant[0], predicateHasConstant[1], isAttack, sessionID, timestep) cursor.execute( "select agentID from questions where sessionID = %s and timestep = %s;", (sessionID, timestep)) processTrust(con, cursor, sessionID, timestep, agentID) #print "processing trust for %s" % (agentID) # for agent in cursor.fetchall(): # inferTrusts(con,cursor,agent[0],agent[0],sessionID,timestep) cursor.execute("select agentID from agents;") for agent in cursor.fetchall(): inferBeliefs(con, cursor, agent[0], sessionID, timestep) # Need to dedupe the beliefs and trusts #argue question argue(con, cursor, predicateHasConstant[1], predicateHasConstant[0], agentID, questionID[0], -1, sessionID, timestep) forwardChain(con, cursor, sessionID, timestep)
def XML_PMS2aTV(address, path, options): cmd = '' if 'PlexConnect' in options: cmd = options['PlexConnect'] if not 'PlexConnectUDID' in options: dprint(__name__, 1, "no PlexConnectUDID - pick 007") options['PlexConnectUDID'] = '007' dprint(__name__, 1, "PlexConnect Cmd: " + cmd) # XML Template selector # - PlexConnect command # - path # - PMS ViewGroup XMLtemplate = '' PMS = None PMSroot = None # XMLtemplate defined by solely PlexConnect Cmd if cmd == 'Play': XMLtemplate = 'PlayVideo.xml' elif cmd == 'PlayVideo_ChannelsV1': dprint(__name__, 1, "playing Channels XML Version 1: {0}".format(path)) return XML_PlayVideo_ChannelsV1( path) # direct link, no PMS XML available elif cmd == 'PhotoBrowser': XMLtemplate = 'Photo_Browser.xml' elif cmd == 'MoviePreview': XMLtemplate = 'MoviePreview.xml' elif cmd == 'MoviePrePlay': XMLtemplate = 'MoviePrePlay.xml' elif cmd == 'EpisodePrePlay': XMLtemplate = 'EpisodePrePlay.xml' elif cmd == 'ChannelPrePlay': XMLtemplate = 'ChannelPrePlay.xml' elif cmd == 'Channels': XMLtemplate = 'Channels.xml' elif cmd == 'ChannelsVideo': XMLtemplate = 'ChannelsVideo.xml' elif cmd == 'ByFolder': XMLtemplate = 'ByFolder.xml' elif cmd == 'MovieSection': XMLtemplate = 'MovieSection.xml' elif cmd == 'TVSection': XMLtemplate = 'TVSection.xml' elif cmd.find('SectionPreview') != -1: XMLtemplate = cmd + '.xml' elif cmd == 'AllMovies': XMLtemplate = 'Movie_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'movieview') + '.xml' elif cmd == 'MovieSecondary': XMLtemplate = 'MovieSecondary.xml' elif cmd == 'AllShows': XMLtemplate = 'Show_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'showview') + '.xml' elif cmd == 'TVSecondary': XMLtemplate = 'TVSecondary.xml' elif cmd == 'Directory': XMLtemplate = 'Directory.xml' elif cmd == 'DirectoryWithPreview': XMLtemplate = 'DirectoryWithPreview.xml' elif cmd == 'DirectoryWithPreviewActors': XMLtemplate = 'DirectoryWithPreviewActors.xml' elif cmd == 'Settings': XMLtemplate = 'Settings.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsVideoOSD': XMLtemplate = 'Settings_VideoOSD.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsMovies': XMLtemplate = 'Settings_Movies.xml' path = '' # clear path - we don't need PMS-XML elif cmd == 'SettingsTVShows': XMLtemplate = 'Settings_TVShows.xml' path = '' # clear path - we don't need PMS-XML elif cmd.startswith('SettingsToggle:'): opt = cmd[len('SettingsToggle:'):] # cut command: parts = opt.split('+') g_ATVSettings.toggleSetting(options['PlexConnectUDID'], parts[0].lower()) XMLtemplate = parts[1] + ".xml" dprint(__name__, 2, "ATVSettings->Toggle: {0} in template: {1}", parts[0], parts[1]) path = '' # clear path - we don't need PMS-XML elif cmd.startswith('Discover'): discoverPMS() XMLtemplate = 'Settings.xml' path = '' # clear path - we don't need PMS-XML elif path.startswith('/search?'): XMLtemplate = 'Search_Results.xml' # determine PMS address PMS_list = g_param['PMS_list'] PMS_uuid = g_ATVSettings.getSetting(options['PlexConnectUDID'], 'pms_uuid') if not PMS_uuid in PMS_list: g_ATVSettings.checkSetting(options['PlexConnectUDID'], 'pms_uuid') # verify PMS_uuid PMS_uuid = g_ATVSettings.getSetting(options['PlexConnectUDID'], 'pms_uuid') if PMS_uuid in PMS_list: g_param['Addr_PMS'] = PMS_list[PMS_uuid]['ip'] + ':' + PMS_list[ PMS_uuid]['port'] else: g_param[ 'Addr_PMS'] = '127.0.0.1:32400' # no PMS available. Addr stupid but valid. # request PMS XML if not path == '': if len(PMS_list) == 0: # PlexGDM if not discoverPMS(): return XML_Error('PlexConnect', 'No Plex Media Server in Proximity') if not PMS_uuid in PMS_list: return XML_Error('PlexConnect', 'Selected Plex Media Server not Online') PMS = XML_ReadFromURL(address, path) if PMS == False: return XML_Error('PlexConnect', 'No Response from Plex Media Server') PMSroot = PMS.getroot() dprint(__name__, 1, "viewGroup: " + PMSroot.get('ViewGroup', 'None')) # XMLtemplate defined by PMS XML content if path == '': pass # nothing to load elif not XMLtemplate == '': pass # template already selected elif PMSroot.get('viewGroup', '') == "secondary" and ( PMSroot.get('art', '').find('movie') != -1 or PMSroot.get('thumb', '').find('movie') != -1): XMLtemplate = 'MovieSectionTopLevel.xml' elif PMSroot.get('viewGroup', '') == "secondary" and ( PMSroot.get('art', '').find('show') != -1 or PMSroot.get('thumb', '').find('show') != -1): XMLtemplate = 'TVSectionTopLevel.xml' elif PMSroot.get('viewGroup', '') == "secondary": XMLtemplate = 'Directory.xml' elif PMSroot.get('viewGroup', '') == 'show': if PMSroot.get('title2') == 'By Folder': # By Folder View XMLtemplate = 'ByFolder.xml' else: # TV Show grid view XMLtemplate = 'Show_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'showview') + '.xml' elif PMSroot.get('viewGroup', '') == 'season': # TV Season view XMLtemplate = 'Season_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'seasonview') + '.xml' elif PMSroot.get('viewGroup', '') == 'movie': if PMSroot.get('title2') == 'By Folder': # By Folder View XMLtemplate = 'ByFolder.xml' else: # Movie listing XMLtemplate = 'Movie_' + g_ATVSettings.getSetting( options['PlexConnectUDID'], 'movieview') + '.xml' elif PMSroot.get('viewGroup', '') == 'track': XMLtemplate = 'Music_Track.xml' elif PMSroot.get('viewGroup', '') == 'episode': if PMSroot.get('title2')=='On Deck' or \ PMSroot.get('title2')=='Recently Viewed Episodes' or \ PMSroot.get('title2')=='Recently Aired' or \ PMSroot.get('title2')=='Recently Added': # TV On Deck View XMLtemplate = 'TV_OnDeck.xml' else: # TV Episode view XMLtemplate = 'Episode.xml' elif PMSroot.get('viewGroup', '') == 'photo': # Photo listing XMLtemplate = 'Photo.xml' else: XMLtemplate = 'Directory.xml' dprint(__name__, 1, "XMLTemplate: " + XMLtemplate) # get XMLtemplate aTVTree = etree.parse(sys.path[0] + '/assets/templates/' + XMLtemplate) aTVroot = aTVTree.getroot() # convert PMS XML to aTV XML using provided XMLtemplate global g_CommandCollection g_CommandCollection = CCommandCollection(options, PMSroot, path) XML_ExpandTree(aTVroot, PMSroot, 'main') XML_ExpandAllAttrib(aTVroot, PMSroot, 'main') del g_CommandCollection dprint(__name__, 1, "====== generated aTV-XML ======") dprint(__name__, 1, XML_prettystring(aTVTree)) dprint(__name__, 1, "====== aTV-XML finished ======") return etree.tostring(aTVroot)
def to_string(self): """ Serialize the object into a XML string """ element_tree = self.transfer_to_element_tree() return ElementTree.tostring(element_tree, encoding="UTF-8")
def create_kml(dir_path): root = ET.Element('kml') Document = ET.SubElement(root, 'Document') name = ET.SubElement(Document, 'name') name.text = '******地图数据' open_ = ET.SubElement(Document, 'open') open_.text = '1' #单位工程范围**************************************************************************************** print('开始输出单位工程范围信息...') Folder_danweigongchengfanwei = ET.SubElement(Document, 'Folder') name = ET.SubElement(Folder_danweigongchengfanwei, 'name') name.text = '单位工程范围' files_danweigongcheng = get_all_files(os.path.join(dir_path,'单位工程范围'), '*.kml') for f in files_danweigongcheng: station_name = os.path.splitext(os.path.basename(f))[0] data = get_kml_coordinates(f,'Document/Folder/Placemark/Polygon/outerBoundaryIs/LinearRing/coordinates') Placemark = ET.SubElement(Folder_danweigongchengfanwei, 'Placemark') name = ET.SubElement(Placemark, 'name') name.text = station_name + '范围' Style = ET.SubElement(Placemark, 'Style') LineStyle = ET.SubElement(Style, 'LineStyle') color = ET.SubElement(LineStyle, 'color') color.text = 'ff555555' width = ET.SubElement(LineStyle, 'width') width.text = '1' LineString = ET.SubElement(Placemark, 'LineString') coordinates = ET.SubElement(LineString, 'coordinates') coordinates.text = '\n\t\t\t\t\t' + data.replace('\n','\n\t\t\t\t\t') + '\n\t\t\t\t\t' OvStyle = ET.SubElement(Placemark, 'OvStyle') TrackStyle = ET.SubElement(OvStyle, 'TrackStyle') type = ET.SubElement(TrackStyle, 'type') type.text = '5' width = ET.SubElement(TrackStyle, 'width') width.text = '1' #线路**************************************************************************************** print('开始输出线路信息...') Folder_xianlu = ET.SubElement(Document, 'Folder') name = ET.SubElement(Folder_xianlu, 'name') name.text = '****线路' open_ = ET.SubElement(Folder_xianlu, 'open') open_.text = '1' #线路 xianlu_dir = os.path.join(dir_path, '******线路') for child_name in os.listdir(xianlu_dir): child_path = os.path.join(xianlu_dir, child_name) if(os.path.isdir(child_path)): files_xianlu = get_all_files(child_path, '*.kml') Placemark = ET.SubElement(Folder_xianlu, 'Placemark') name = ET.SubElement(Placemark, 'name') name.text = child_name Style = ET.SubElement(Placemark, 'Style') LineStyle = ET.SubElement(Style, 'LineStyle') color = ET.SubElement(LineStyle, 'color') color.text = 'ffd18802' width = ET.SubElement(LineStyle, 'width') width.text = '1' MultiGeometry = ET.SubElement(Placemark, 'MultiGeometry') for f in files_xianlu: # line_name = os.path.splitext(os.path.basename(f))[0] data = get_kml_coordinates(f, 'Document/Folder/Placemark/LineString/coordinates') LineString = ET.SubElement(MultiGeometry, 'LineString') coordinates = ET.SubElement(LineString, 'coordinates') coordinates.text = '\n\t\t\t\t\t\t' + data.replace('\n', '\n\t\t\t\t\t\t') + '\n\t\t\t\t\t\t' OvStyle = ET.SubElement(Placemark, 'OvStyle') TrackStyle = ET.SubElement(OvStyle, 'TrackStyle') type = ET.SubElement(TrackStyle, 'type') type.text = '5' width = ET.SubElement(TrackStyle, 'width') width.text = '1' #单位工程位置**************************************************************************************** print('开始输出单位工程位置信息...') Folder_danweigongcheng = ET.SubElement(Document, 'Folder') name = ET.SubElement(Folder_danweigongcheng, 'name') name.text = '单位工程' file = os.path.join(dir_path, '单位工程','**经纬度.kml') data = get_kml_coordinates(file, 'Document/Folder/Placemark/LineString/coordinates') datalist = data.split('\n') if(len(datalist) == len(******LIST)): print('******数量:'+ str(len(datalist))) for i in range(len(datalist)): Placemark = ET.SubElement(Folder_danweigongcheng, 'Placemark') name = ET.SubElement(Placemark, 'name') name.text = ******LIST[i] Style = ET.SubElement(Placemark, 'Style') IconStyle = ET.SubElement(Style, 'IconStyle') color = ET.SubElement(IconStyle, 'color') color.text = 'ffffffff' scale = ET.SubElement(IconStyle, 'scale') scale.text = '1' Icon = ET.SubElement(IconStyle, 'Icon') href = ET.SubElement(Icon, 'href') href.text = 'http://maps.google.com/mapfiles/kml/shapes/bus.png' Point = ET.SubElement(Placemark, 'Point') coordinates = ET.SubElement(Point, 'coordinates') coordinates.text = datalist[i] rawText = ET.tostring(root) dom = MN.parseString(rawText) with open(os.path.join(dir_path,'testout.kml'), 'w', encoding='utf-8') as f: dom.writexml(f, '', '\t', '\n', encoding='utf-8')