def presentation_serializer(serializer): filing = serializer.filing date = filing.date company = filing.company nsmap = gen_nsmap(filing, 'Presentation') maker = ElementMaker(nsmap=nsmap) with xml_namespace(maker, None, auto_convert=True) as maker: linkbase = maker.linkbase(**{ #find out about this 'xsi:schemaLocation': 'http://www.xbrl.org/2003/linkbase http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd', 'xmlns': 'http://www.xbrl.org/2003/linkbase', }) for chart in filing.charts: roleRef = maker.roleRef(**{ 'roleURI': convert_role_url(chart.role, filing), 'xlink:type': 'simple', 'xlink:href': '{0}#{1}'.format( serializer.document_name('Schema'), chart.role ) }) linkbase.append(roleRef) chart.bind(serializer) linkbase.append(chart_serializer(chart, filing, maker)) return linkbase
def _add_win_memory_size(self, dom, wnd_size): root = objectify.fromstring(dom.XMLDesc(0)) val = CMDLINE_FIELD_NAME + '=' + wnd_size cmdline = root.find('{%s}commandline' % QEMU_NAMESPACE) # <qemu:commandline> doesn't exist, create the full commandline xml # with the required values and return if cmdline is None: args = {} args['-global'] = val root.append(etree.fromstring(get_qemucmdline_xml(args))) return etree.tostring(root, encoding='utf-8', pretty_print=True) # <qemu:commandline> exists and already has the tag # <qemu:arg value='-global'> (user could already been using this for # something else), so we just add our <qemu:arg...> missing. found = False for arg in cmdline.iterchildren(): if arg.values()[0] == '-global': EM = ElementMaker(namespace=QEMU_NAMESPACE, nsmap={'qemu': QEMU_NAMESPACE}) cmdline.append(EM.arg(value=val)) found = True break # <qemu:commandline> exists but there is no <qemu:arg value global> # so, we add those missing arguments inside the exising cmdline if not found: EM = ElementMaker(namespace=QEMU_NAMESPACE, nsmap={'qemu': QEMU_NAMESPACE}) cmdline.append(EM.arg(value='-global')) cmdline.append(EM.arg(value=val)) return etree.tostring(root, encoding='utf-8', pretty_print=True)
def add_container_xml (self, rootfilename): """ Write container.xml <?xml version='1.0' encoding='UTF-8'?> <container xmlns='urn:oasis:names:tc:opendocument:xmlns:container' version='1.0'> <rootfiles> <rootfile full-path='$path' media-type='application/oebps-package+xml' /> </rootfiles> </container> """ rootfilename = os.path.join (self.oebps_path, rootfilename) ns_oasis = 'urn:oasis:names:tc:opendocument:xmlns:container' ocf = ElementMaker (namespace = ns_oasis, nsmap = { None: ns_oasis } ) container = ocf.container ( ocf.rootfiles ( ocf.rootfile (**{ 'full-path': rootfilename, 'media-type': 'application/oebps-package+xml'})), version = '1.0') i = self.zi () i.filename = 'META-INF/container.xml' self.writestr (i, etree.tostring (container, encoding = 'utf-8', xml_declaration = True, pretty_print = True))
def construct_abook(contacts): e = ElementMaker() people = [] for contact in contacts: # skip non-email if not contact['address-list']: continue adresses = [] for mail in contact['address-list']: adresses.append(e.address(alias='', remarks='', uid=get_uid(), email=mail)) person = e.person(getattr(e, 'address-list')(*adresses), getattr(e, 'attribute-list')(), **{ 'uid' : get_uid(), 'first-name' : contact['first-name'], 'last-name' : contact['last-name'], 'cn' : contact['cn'] }) people.append(person) abook = getattr(e, 'address-book')(name="Gmail", *people) return abook
def process_item(self, item, spider): # # # need this namespace dc = ElementMaker(nsmap={'dc' : "http://purl.org/dc/elements/1.1/"}) # # # ah python: good to be back xml = E.item( E.link(item['source_url']), E.guid(item['source_url'], is_permalink='false'), E.pubDate(item['time'].strftime("%a, %d %b %Y %H:%M:%S %z")), E.title(item['headline']), dc.creator(item['source_name']), # # # the categories are mostly not printed in the rss-reader, # which is why we prepend to body as well E.category(item['source_name']), E.category(item['place'], domain='place'), E.category(item['time'].strftime("%d.%m.%Y"), domain='date'), E.description(item['body']) ) # # # for now we just always write the file since it might be an update xml_filename = item.filename('xml') with open(xml_filename, 'wb') as fh: fh.write(etree.tostring(xml, pretty_print=True)) fh.close() return item
def __init__(self, package_ref, scheme=None, username=None, password=None): # Convert package_ref to package URL url = package_ref parsed = urlsplit(url) if parsed.scheme == '': # Local file path. Try to parse the file as a package reference. try: url = PackageReference.parse(parsed.path).url except BadReferenceError: # Failed. Assume it's a package. url = urlunsplit(('file', '', os.path.abspath(parsed.path), '', '')) # Load package self.package = Package(url, scheme=scheme, username=username, password=password) # Validate domain XML self.domain_xml = DomainXML(self.package.domain.data) # Create vmnetfs config e = ElementMaker(namespace=VMNETFS_NS, nsmap={None: VMNETFS_NS}) self.vmnetfs_config = e.config() self.vmnetfs_config.append(_ReferencedObject('disk', self.package.disk, username=username, password=password).vmnetfs_config) if self.package.memory: self.vmnetfs_config.append(_ReferencedObject('memory', self.package.memory, username=username, password=password).vmnetfs_config)
def write_tmx(stream, sentence_pairs, language_a, language_b): """ Writes the SentencePair's out in tmx format, """ maker = ElementMaker() token = "".join(random.sample(letters * 3, 50)) token_a = "".join(random.sample(letters * 3, 50)) token_b = "".join(random.sample(letters * 3, 50)) header = maker.header(srclang=language_a, segtype="sentence", creationtool="MTrans", datatype="PlainText") stream.write("<?xml version=\"1.0\" ?>\n") stream.write("<!DOCTYPE tmx SYSTEM \"tmx14.dtd\">\n") stream.write("<tmx version=\"1.4\">\n") stream.write(etree.tostring(header, encoding="utf-8")) stream.write("\n<body>\n") for sentence_a, sentence_b in sentence_pairs: src_tuv = maker.tuv({token: language_a}, maker.seg(token_a)) tgt_tuv = maker.tuv({token: language_b}, maker.seg(token_b)) tu = maker.tu(src_tuv, tgt_tuv) tu_text = etree.tostring(tu, encoding="utf-8", pretty_print=True) tu_text = tu_text.replace(token, "xml:lang") if sentence_a and sentence_b: tu_text = tu_text.replace(token_a, sentence_a.to_text()) tu_text = tu_text.replace(token_b, sentence_b.to_text()) stream.write(tu_text) stream.write("</body>\n</tmx>")
def legend_img_from_style(self, style_name, subclass, w, h): with codecs.open(path.join(LAYERS_DEF_STYLES_PATH, style_name + '.xml'), encoding='utf-8') as style_file: map_xml = style_file.read() E = ElementMaker() buf = StringIO() emap = etree.fromstring(map_xml) esymbolset = E.symbolset(resource_filename('nextgisweb_mapserver', 'symbolset')) emap.insert(0, esymbolset) # PIXMAP и SVG маркеры for type_elem in emap.iterfind('./symbol/type'): if type_elem.text not in ('pixmap', 'svg'): continue symbol = type_elem.getparent() image = symbol.find('./image') marker = Marker.filter_by(keyname=image.text).one() image.text = self.env.file_storage.filename(marker.fileobj) # FONTS fonts_e = E.fontset(self.env.mapserver.settings['fontset']) emap.insert(0, fonts_e) mapf_map = Map().from_xml(emap) mapfile(mapf_map, buf) mapobj = mapscript.fromstring(buf.getvalue().encode('utf-8')) layerobj = mapobj.getLayer(0) classobj = layerobj.getClass(subclass) gdimg = classobj.createLegendIcon(mapobj, layerobj, w, h) mem_png = StringIO(gdimg.saveToString()) img = Image.open(mem_png) return img
def rasterToSvg(imagearray, mask, blobs): #image array is assumed to be height x width x 3; blue:0, green:1, red:2 E = ElementMaker(namespace=SVG_NAMESPACE, nsmap={None: SVG_NAMESPACE, "xlink": XLINK_NAMESPACE}) height, width = mask.shape wholeImage = E.svg(version="1.1", \ width="100%", \ height="100%", \ preserveAspectRatio="xMidYMin meet", \ viewBox="0 0 %d %d" % (width, height), \ style="fill: none; stroke: black; stroke-linejoin: miter; stroke-width: 2; text-anchor: middle;") # Based on how your Numpy arrays are structured, you may want a # different normalization scheme (see comments above). #sys.stderr.write("This is the size of the arrays: (%r,%r),(%r,%r)\n" % (imagearray.shape,mask.shape)) rasterAsBase64Png = rasterToPng(imagearray, normalizeMaskToAlpha(mask)) rasterElement = E.image(**{XLINK_HREF: "data:image/png;base64," + rasterAsBase64Png, \ "x": "0", "y": "0", "width": repr(width), "height": repr(height), \ "preserveAspectRatio": "none"}) wholeImage.append(rasterElement) # If you're writing to disk, you can lower the overhead by writing # directly to a file object, rather than a StringIO. output = StringIO.StringIO() output.write(SVG_FILE_HEADER) ElementTree(wholeImage).write(output) return output.getvalue()
class ModsRoot(object): def __init__(self, collection=False): self._me = ElementMaker( namespace=MODS_NAMESPACE, nsmap=MODS_NAMESPACE_MAP, makeelement=makeelement ) if collection: self._root = self._me.modsCollection() else: self._root = self._me.mods() self._root.set( '{{{}}}schemaLocation'.format(XSI_NAMESPACE), '{} {}'.format(MODS_NAMESPACE, MODS_SCHEMA_LOC) ) @property def etree(self): return self._root def as_xml(self, xml_declaration=False, pretty_print=True): return etree.tostring( self._root, xml_declaration=xml_declaration, pretty_print=pretty_print, encoding='utf-8' )
def test_xml_namespace_none(self): from lxml.builder import ElementMaker maker = ElementMaker(namespace='http://foo/', nsmap=self.nsmap) self.assertEqual(maker._namespace, '{http://foo/}') with xml_namespace(maker, None): self.assertEqual(etree.tostring(maker.test(), pretty_print=True), '<test xmlns:test="http://test/" xmlns:foo="http://foo/" xmlns:bar="http://bar/"/>\n') self.assertEqual(maker._namespace, '{http://foo/}')
def full_xml(spreadsheet, s): """ Print the full parsed xml for the given spreadsheet filename""" global structure structure = __import__(s, fromlist=['']) global datemode global E E = ElementMaker() book = xlrd.open_workbook(spreadsheet) datemode = book.datemode sheet = book.sheet_by_index(structure.structure["metadata"]) # TODO Add handling of proper excel dates try: datestring = get_date(sheet.cell_value(rowx=structure.metadata["date"]["row"], colx=structure.metadata["date"]["col"])) except TypeError: print "type error!" datestring = "" except ValueError: print "Value Error: are your dates properly formatted?" datestring = "" # TODO Make default language configurable root = lang("en", E.implementation( E.metadata( E.publisher( silent_value(sheet, rowx=structure.metadata["name"]["row"], colx=structure.metadata["name"]["col"]), code=silent_value(sheet, rowx=structure.metadata["code"]["row"], colx=structure.metadata["code"]["col"]) ), E.version(silent_value(sheet, rowx=structure.metadata["version"]["row"], colx=structure.metadata["version"]["col"])), E.date(datestring), E.schedule_type( structure.identification["name"], code= structure.identification["code"] ) ), parse_information( E.publishing(), book.sheet_by_index(structure.structure["publishing"]), structure.publishing_rows, datestring # provide date of implementation schedule; some answers can be e.g. "already published" which means the date of schedule. ), parse_data( E.organisation(), book.sheet_by_index(structure.structure["organisation"]), structure.organisation_rows ), parse_data( E.activity(), book.sheet_by_index(structure.structure["activity"]), structure.activity_rows ) )) root.set('generated-datetime', datetimestamp()) return root
def build_error_response(self, request_id, code, description=None): ''' Assemble the XML for an error response payload. request_id -- Request ID of offending payload code -- The HTTP Error Code Status we want to use description -- An extra note on what was not acceptable Returns: An lxml.etree.Element object containing the payload ''' oadr = ElementMaker(namespace=self.ns_map['oadr'], nsmap=self.ns_map) pyld = ElementMaker(namespace=self.ns_map['pyld'], nsmap=self.ns_map) ei = ElementMaker(namespace=self.ns_map['ei'], nsmap=self.ns_map) payload = oadr.oadrCreatedEvent( pyld.eiCreatedEvent( ei.eiResponse( ei.responseCode(code), pyld.requestID() ), ei.venID(self.ven_id) ) ) logging.debug( "Error payload:\n%s", etree.tostring(payload,pretty_print=True) ) return payload
def convert_metadata(self, mi): E = ElementMaker(namespace=namespaces['cp'], nsmap={x:namespaces[x] for x in 'cp dc dcterms xsi'.split()}) cp = E.coreProperties(E.revision("1"), E.lastModifiedBy('calibre')) ts = utcnow().isoformat(str('T')).rpartition('.')[0] + 'Z' for x in 'created modified'.split(): x = cp.makeelement('{%s}%s' % (namespaces['dcterms'], x), **{'{%s}type' % namespaces['xsi']:'dcterms:W3CDTF'}) x.text = ts cp.append(x) self.mi = mi update_doc_props(cp, self.mi) return xml2str(cp)
def wrapFasta(fasta): DI = "http://rostlab.org/disulfinder/input" XSI = "http://www.w3.org/2001/XMLSchema-instance" WSDL_MSG = "http://alex.tbl/webservice/disulfinder" NS_MAP = {"di": DI, "xsi": XSI , "msg" : WSDL_MSG} DI = ElementMaker(namespace = DI, nsmap = NS_MAP) MSG = ElementMaker(namespace= WSDL_MSG, nsmap = NS_MAP) with open(fasta) as f: seq = f.read() mydoc = MSG.getDisulfinderRequest(DI.sequence(seq)) return mydoc
def convert_metadata(self, oeb): E = ElementMaker(namespace=namespaces['cp'], nsmap={x:namespaces[x] for x in 'cp dc dcterms xsi'.split()}) cp = E.coreProperties(E.revision("1"), E.lastModifiedBy('calibre')) ts = utcnow().isoformat(str('T')).rpartition('.')[0] + 'Z' for x in 'created modified'.split(): x = cp.makeelement('{%s}%s' % (namespaces['dcterms'], x), **{'{%s}type' % namespaces['xsi']:'dcterms:W3CDTF'}) x.text = ts cp.append(x) package = etree.Element(OPF('package'), attrib={'version': '2.0'}, nsmap={None: OPF2_NS}) oeb.metadata.to_opf2(package) self.mi = ReadOPF(BytesIO(xml2str(package)), populate_spine=False, try_to_guess_cover=False).to_book_metadata() update_doc_props(cp, self.mi) return xml2str(cp)
def __init__(self, url): self.url = url # Generate XML e = ElementMaker(namespace=NS, nsmap={None: NS}) tree = e.reference( e.url(self.url), ) try: schema.assertValid(tree) except etree.DocumentInvalid, e: raise BadReferenceError( 'Generated XML does not validate (bad URL?)', str(e))
def test_auto_convert(self): from lxml.builder import ElementMaker maker = ElementMaker(namespace='http://foo/', nsmap =self.nsmap) before = maker.presentationLink(**{ '{http://test/}type': 'extended', '{http://foo/}role': 'fasdfa', }) with auto_convert(maker) as maker: after = maker.presentationLink(**{ 'test:type': 'extended', 'foo:role': 'fasdfa', }) self.assertEqual(etree.tostring(before), etree.tostring(after))
def merge_xmp_packet(old, new): """ Merge metadata present in the old packet that is not present in the new one into the new one. Assumes the new packet was generated by metadata_to_xmp_packet() """ old, new = parse_xmp_packet(old), parse_xmp_packet(new) # As per the adobe spec all metadata items have to be present inside top-level rdf:Description containers item_xpath = XPath("//rdf:RDF/rdf:Description/*") # First remove all data fields that metadata_to_xmp_packet() knowns about, # since either they will have been set or if not present, imply they have # been cleared defined_tags = {expand(prefix + ":" + scheme) for prefix in ("prism", "pdfx") for scheme in KNOWN_ID_SCHEMES} defined_tags |= { expand("dc:" + x) for x in ("identifier", "title", "creator", "date", "description", "language", "publisher", "subject") } defined_tags |= {expand("xmp:" + x) for x in ("MetadataDate", "Identifier")} # For redundancy also remove all fields explicitly set in the new packet defined_tags |= {x.tag for x in item_xpath(new)} calibrens = "{%s}" % NS_MAP["calibre"] for elem in item_xpath(old): if elem.tag in defined_tags or (elem.tag and elem.tag.startswith(calibrens)): elem.getparent().remove(elem) # Group all items into groups based on their namespaces groups = defaultdict(list) for item in item_xpath(new): ns = item.nsmap[item.prefix] groups[ns].append(item) for item in item_xpath(old): ns = item.nsmap[item.prefix] groups[ns].append(item) A = ElementMaker(namespace=NS_MAP["x"], nsmap=nsmap("x")) R = ElementMaker(namespace=NS_MAP["rdf"], nsmap=nsmap("rdf")) root = A.xmpmeta(R.RDF) rdf = root[0] for namespace in sorted( groups, key=lambda x: {NS_MAP["dc"]: "a", NS_MAP["xmp"]: "b", NS_MAP["calibre"]: "c"}.get(x, "z" + x) ): items = groups[namespace] desc = rdf.makeelement(expand("rdf:Description"), nsmap=find_nsmap(items)) desc.set(expand("rdf:about"), "") rdf.append(desc) for item in items: clone_into(desc, item) return serialize_xmp_packet(root)
def report(self, file_format='html'): from lxml.builder import ElementMaker, E self.plot(filedir=self.report_dir, file_format='jpg') element_maker = ElementMaker(namespace=None, nsmap={None: "http://www.w3.org/1999/xhtml"}) html = element_maker.html(E.head(E.title("VASP Ideal Strength")), E.body(E.h1("VASP Ideal Strength"), E.h2('Structure'), E.pre(str(self.structure)), E.h2('Ideal Strength'), E.p(E.img(src='strenth.jpg', width="800", height="600", alt="Strength")), )) return self.report_end(html, file_format)
def report(self, file_format='html'): from lxml.builder import ElementMaker, E self.plot(figname=self.report_dir + os.sep + 'static.jpg') element_maker = ElementMaker(namespace=None, nsmap={None: "http://www.w3.org/1999/xhtml"}) html = element_maker.html(E.head(E.title("ABINIT Static Calculation")), E.body(E.h1("ABINIT Static Calculation"), E.h2('Structure'), E.pre(str(self.structure)), E.h2('Self Consistent Field Convergence'), E.p(E.img(src='static.jpg', width="800", height="600", alt="Static Calculation")) )) return self.report_end(html, file_format)
def rasterAndPolygonsToSvg( imagearray, mask, polygons ): # image array is assumed to be height x width x 3; blue:0, green:1, red:2 E = ElementMaker(namespace=SVG_NAMESPACE, nsmap={None: SVG_NAMESPACE, "xlink": XLINK_NAMESPACE}) height, width = mask.shape wholeImage = E.svg( version="1.1", width="100%", height="100%", preserveAspectRatio="xMidYMin meet", viewBox="0 0 %d %d" % (width, height), style="fill: none; stroke: black; stroke-linejoin: miter; stroke-width: 2; text-anchor: middle;", ) # Based on how your Numpy arrays are structured, you may want a # different normalization scheme (see comments above). # sys.stderr.write("This is the size of the arrays: (%r,%r),(%r,%r)\n" % (imagearray.shape,mask.shape)) rasterAsBase64Png = rasterToPng(imagearray, normalizeMaskToAlpha(mask)) rasterElement = E.image( **{ XLINK_HREF: "data:image/png;base64," + rasterAsBase64Png, "x": "0", "y": "0", "width": repr(width), "height": repr(height), "preserveAspectRatio": "none", } ) wholeImage.append(rasterElement) # Convert the polygons from a dictionary into a bunch of SVG paths # and add them to the image. for polygon in polygons: points = ["L %r %r" % (x, y) for x, y in polygon["points"]] if len(points) > 0: points[0] = "M" + points[0][1:] # first is a moveto (M), not a lineto (L) points.append("Z") # close the polygon polygonElement = E.path(d=" ".join(points), style=polygon["style"]) wholeImage.append(polygonElement) # If you're writing to disk, you can lower the overhead by writing # directly to a file object, rather than a StringIO. output = StringIO.StringIO() output.write(SVG_FILE_HEADER) ElementTree(wholeImage).write(output) return output.getvalue()
def to_html (self): """ Return a <html:head> element with DC metadata. """ w = _HTML_Writer () self.feed_to_writer (w) e = ElementMaker () head = e.head ( e.link (rel = "schema.DCTERMS", href = str (NS.dcterms)), e.link (rel = "schema.MARCREL", href = str (NS.marcrel)), profile = "http://dublincore.org/documents/2008/08/04/dc-html/", *w.metadata ) return head
def __init__(self, domain_name, stock_settings, case_stub_list): self.domain_name = domain_name self.stock_settings = stock_settings self.case_stub_list = case_stub_list from lxml.builder import ElementMaker self.elem_maker = ElementMaker(namespace=COMMTRACK_REPORT_XMLNS)
class TestEventRegisty(unittest.TestCase): def setUp(self): self.E = ElementMaker(namespace=ns) def testEventRegisty(self): # Define a test context res = {"value": "FOO"} # Define a function used as event callback that will modify the context def misc(*args, **kwargs): res["value"] = "BAR" url = "wave://[email protected]/test" user="******" event_registry = operation.EventRegisty(user=user, callback=misc) self.assertEqual(event_registry.user, user) # Create an operation for it's event. NS = opdev.OperationNS(ns) @NS def op(event, *args, **kwargs): pass # Test that event isn't triggered before register operation.performOperation(event_registry, self.E.op(href=url)) self.assertEqual(res["value"],"FOO") # Trigger event event_registry.register(url, "{%s}op" % ns) operation.performOperation(event_registry, self.E.op(href=url)) self.assertEqual(res["value"], "BAR") # test different URL res["value"] = "FOOBAR" operation.performOperation(event_registry, self.E.op(href="pyofwave.info/Firefly")) self.assertEqual(res["value"], "FOOBAR") # Unregister one event res["value"] = "BARFOO" event_registry.unregister(url, "{%s}op" % ns) operation.performOperation(event_registry, self.E.op(href=url)) self.assertEqual(res["value"], "BARFOO") # Unregister all events event_registry.register(url, "{%s}op1" % ns) event_registry.register(url, "{%s}op2" % ns) event_registry.unregister(url, "*")
def report(self, file_format='html'): from lxml.builder import ElementMaker, E self.plot(filedir=self.report_dir, file_format='jpg') element_maker = ElementMaker(namespace=None, nsmap={None: "http://www.w3.org/1999/xhtml"}) html = element_maker.html(E.head(E.title("ABINIT Ion Relaxation")), E.body(E.h1("ABINIT Ion Relaxation"), E.h2('Initial Structure'), E.pre(str(self.structure)), E.h2('Forces Minimization'), E.p(E.img(src='forces.jpg', width="800", height="600", alt="Forces")), E.h2('Stress Minimization'), E.p(E.img(src='stress.jpg', width="800", height="600", alt="Stress")) )) return self.report_end(html, file_format)
def createObject(self, pid, label, state=u"A"): foxml = ElementMaker(namespace=NSMAP["foxml"], nsmap=NSMAP) foxml_state = {"A": u"Active", "I": u"Inactive", "D": u"Deleted"}[state] doc = foxml.digitalObject( foxml.objectProperties( foxml.property(NAME="info:fedora/fedora-system:def/model#state", VALUE=state), foxml.property(NAME="info:fedora/fedora-system:def/model#label", VALUE=label), ), VERSION="1.1", PID=pid, ) body = etree.tostring(doc, encoding="UTF-8", xml_declaration=False) request = self.api.createObject(pid=pid) request.headers["Content-Type"] = "text/xml; charset=utf-8" response = request.submit(body, state=state[0], label=label) return self.getObject(pid)
def xmlfilter(self, name, data): propmaker = ElementMaker(namespace=NSMAP['apps']) properties = [] for k,v in data.items(): if v is None: continue properties.append(propmaker.property( name=k, value=v)) return E.entry( E.category(term='filter'), E.title('Mail filter'), E.id(name), E.content(), *properties )
def writeOPF_metadata(): E = ElementMaker(nsmap=nsmap) DC = ElementMaker(namespace=nsmap['dc']) tree = E.metadata(DC.identifier({'id':'bookid'}, self.coll.coll_id), DC.language(self.coll.language), DC.title(self.coll.title or 'untitled'), DC.creator(self.coll.editor), DC.publisher(config.publisher), ) return tree
def merge_xmp_packet(old, new): ''' Merge metadata present in the old packet that is not present in the new one into the new one. Assumes the new packet was generated by metadata_to_xmp_packet() ''' old, new = parse_xmp_packet(old), parse_xmp_packet(new) # As per the adobe spec all metadata items have to be present inside top-level rdf:Description containers item_xpath = XPath('//rdf:RDF/rdf:Description/*') # First remove all data fields that metadata_to_xmp_packet() knowns about, # since either they will have been set or if not present, imply they have # been cleared defined_tags = {expand(prefix + ':' + scheme) for prefix in ('prism', 'pdfx') for scheme in KNOWN_ID_SCHEMES} defined_tags |= {expand('dc:' + x) for x in ('identifier', 'title', 'creator', 'date', 'description', 'language', 'publisher', 'subject')} defined_tags |= {expand('xmp:' + x) for x in ('MetadataDate', 'Identifier')} # For redundancy also remove all fields explicitly set in the new packet defined_tags |= {x.tag for x in item_xpath(new)} calibrens = '{%s}' % NS_MAP['calibre'] for elem in item_xpath(old): if elem.tag in defined_tags or (elem.tag and elem.tag.startswith(calibrens)): elem.getparent().remove(elem) # Group all items into groups based on their namespaces groups = defaultdict(list) for item in item_xpath(new): ns = item.nsmap[item.prefix] groups[ns].append(item) for item in item_xpath(old): ns = item.nsmap[item.prefix] groups[ns].append(item) A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x')) R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf')) root = A.xmpmeta(R.RDF) rdf = root[0] for namespace in sorted(groups, key=lambda x:{NS_MAP['dc']:'a', NS_MAP['xmp']:'b', NS_MAP['calibre']:'c'}.get(x, 'z'+x)): items = groups[namespace] desc = rdf.makeelement(expand('rdf:Description'), nsmap=find_nsmap(items)) desc.set(expand('rdf:about'), '') rdf.append(desc) for item in items: clone_into(desc, item) return serialize_xmp_packet(root)
import sys from urllib import urlencode from django.core.urlresolvers import reverse from lxml import etree from lxml.builder import E, ElementMaker from main.utils import absolute_uri from marketplace.models import Product, Country, CurrencyExchangeRate logger = logging.getLogger(__name__) g = "http://base.google.com/ns/1.0" nsmap = {"g": g, None: ""} em = ElementMaker(namespace=g, nsmap=nsmap) defaultem = ElementMaker(namespace="", nsmap=nsmap) # When a shipping profile specifies that the product ships worldwide we need # to specify the top 100 countries (by number of internet users). # This is because Google doesn't allow us to specify 'ships worldwide', and we # must specify individual countries, but restricts us to 100. # http://www.indexmundi.com/g/r.aspx?t=100&v=118 WORLDWIDE_SHIPPING_COUNTRIES = ( 'CN', 'US', 'JP', 'BR', 'DE', 'IN', 'GB', 'FR', 'NG', 'RU', 'KR', 'MX', 'IT', 'ES', 'TR', 'CA', 'VN', 'CO', 'PL', 'PK', 'EG', 'ID', 'TH', 'TW', 'AU', 'MY', 'NL', 'AR', 'MA', 'SA', 'PE', 'VE', 'SE', 'PH', 'IR', 'BE', 'RO', 'UA', 'CL', 'CZ', 'HU', 'CH', 'AT', 'KZ', 'PT', 'GR', 'HK', 'DK', 'DZ', 'UZ', 'IL', 'SY', 'NO', 'ZA', 'SD', 'RS', 'SK', 'KE', 'TN', 'AE', 'NZ', 'BG', 'EC', 'SG', 'UG', 'IE', 'DO', 'BY', 'AZ', 'YE', 'GT', 'HR', 'KG', 'LT', 'SN', 'LK', 'JO', 'JM', 'LV', 'CR', 'OM', 'ZW', 'BA', 'UY',
def monta_xml(self, urn, norma): publicador = LexmlPublicador.objects.first() if norma and publicador: LEXML = ElementMaker(namespace=self.ns['lexml'], nsmap=self.ns) oai_lexml = LEXML.LexML() oai_lexml.attrib['{{}}schemaLocation'.format( self.XSI_NS)] = '{} {}'.format( 'http://www.lexml.gov.br/oai_lexml', 'http://projeto.lexml.gov.br/esquemas/oai_lexml.xsd') texto_integral = norma.texto_integral mime_types = { 'doc': 'application/msword', 'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', 'odt': 'application/vnd.oasis.opendocument.text', 'pdf': 'application/pdf', 'rtf': 'application/rtf' } if texto_integral: url_conteudo = self.config['base_url'] + texto_integral.url extensao = texto_integral.url.split('.')[-1] formato = mime_types.get(extensao, 'application/octet-stream') else: formato = 'text/html' url_conteudo = self.config['base_url'] + reverse( 'sapl.norma:normajuridica_detail', kwargs={'pk': norma.numero}) element_maker = ElementMaker() id_publicador = str(publicador.id_publicador) item_conteudo = element_maker.Item(url_conteudo, formato=formato, idPublicador=id_publicador, tipo='conteudo') oai_lexml.append(item_conteudo) url = self.config['base_url'] + reverse( 'sapl.norma:normajuridica_detail', kwargs={'pk': norma.numero}) item_metadado = element_maker.Item(url, formato='text/html', idPublicador=id_publicador, tipo='metadado') oai_lexml.append(item_metadado) documento_individual = element_maker.DocumentoIndividual(urn) oai_lexml.append(documento_individual) if norma.tipo.equivalente_lexml == 'lei.organica': epigrafe = '{} de {} - {}, de {}'.format( norma.tipo.descricao, casa.municipio, casa.uf, norma.ano) elif norma.tipo.equivalente_lexml == 'constituicao': epigrafe = '{} do Estado de {}, de {}'.format( norma.tipo.descricao, casa.municipio, norma.ano) else: epigrafe = '{} n° {}, de {}'.format( norma.tipo.descricao, norma.numero, self.data_por_extenso(norma.data)) oai_lexml.append(element_maker.Epigrafe(epigrafe)) oai_lexml.append(element_maker.Ementa(norma.ementa)) indexacao = norma.indexacao if indexacao: oai_lexml.append(element_maker.Indexacao(indexacao)) return etree.tostring(oai_lexml) else: return None
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. """ from lxml.builder import ElementMaker from ..utils import convert_datetime_to_utc from ..compat import _unicode import base64 MSG_NS = u'http://schemas.microsoft.com/exchange/services/2006/messages' TYPE_NS = u'http://schemas.microsoft.com/exchange/services/2006/types' SOAP_NS = u'http://schemas.xmlsoap.org/soap/envelope/' NAMESPACES = {u'm': MSG_NS, u't': TYPE_NS, u's': SOAP_NS} M = ElementMaker(namespace=MSG_NS, nsmap=NAMESPACES) T = ElementMaker(namespace=TYPE_NS, nsmap=NAMESPACES) EXCHANGE_DATETIME_FORMAT = u"%Y-%m-%dT%H:%M:%SZ" EXCHANGE_DATE_FORMAT = u"%Y-%m-%d" DISTINGUISHED_IDS = ( 'calendar', 'contacts', 'deleteditems', 'drafts', 'inbox', 'journal', 'notes', 'outbox', 'sentitems',
def metadata_to_xmp_packet(mi): A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x')) R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf')) root = A.xmpmeta(R.RDF) rdf = root[0] dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc')) dc.set(expand('rdf:about'), '') rdf.append(dc) for prop, tag in { 'title': 'dc:title', 'comments': 'dc:description' }.iteritems(): val = mi.get(prop) or '' create_alt_property(dc, tag, val) for prop, (tag, ordered) in { 'authors': ('dc:creator', True), 'tags': ('dc:subject', False), 'publisher': ('dc:publisher', False), }.iteritems(): val = mi.get(prop) or () if isinstance(val, basestring): val = [val] create_sequence_property(dc, tag, val, ordered) if not mi.is_null('pubdate'): create_sequence_property(dc, 'dc:date', [isoformat(mi.pubdate, as_utc=False) ]) # Adobe spec recommends local time if not mi.is_null('languages'): langs = filter( None, map(lambda x: lang_as_iso639_1(x) or canonicalize_lang(x), mi.languages)) if langs: create_sequence_property(dc, 'dc:language', langs, ordered=False) xmp = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('xmp', 'xmpidq')) xmp.set(expand('rdf:about'), '') rdf.append(xmp) extra_ids = {} for x in ('prism', 'pdfx'): p = extra_ids[x] = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap(x)) p.set(expand('rdf:about'), '') rdf.append(p) identifiers = mi.get_identifiers() if identifiers: create_identifiers(xmp, identifiers) for scheme, val in identifiers.iteritems(): if scheme in {'isbn', 'doi'}: for prefix, parent in extra_ids.iteritems(): ie = parent.makeelement(expand('%s:%s' % (prefix, scheme))) ie.text = val parent.append(ie) d = xmp.makeelement(expand('xmp:MetadataDate')) d.text = isoformat(now(), as_utc=False) xmp.append(d) calibre = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('calibre', 'calibreSI', 'calibreCC')) calibre.set(expand('rdf:about'), '') rdf.append(calibre) if not mi.is_null('rating'): try: r = float(mi.rating) except (TypeError, ValueError): pass else: create_simple_property(calibre, 'calibre:rating', '%g' % r) if not mi.is_null('series'): create_series(calibre, mi.series, mi.series_index) if not mi.is_null('timestamp'): create_simple_property(calibre, 'calibre:timestamp', isoformat(mi.timestamp, as_utc=False)) for x in ('author_link_map', 'user_categories'): val = getattr(mi, x, None) if val: create_simple_property(calibre, 'calibre:' + x, dump_dict(val)) for x in ('title_sort', 'author_sort'): if not mi.is_null(x): create_simple_property(calibre, 'calibre:' + x, getattr(mi, x)) all_user_metadata = mi.get_all_user_metadata(True) if all_user_metadata: create_user_metadata(calibre, all_user_metadata) return serialize_xmp_packet(root)
>>> print lxml.etree.tostring(html, pretty_print=True) <html> <head> <title>Hello World</title> </head> <body class="main"> <h1>Hello World !</h1> </body> </html> """ from lxml.builder import ElementMaker from lxml.html import html_parser E = ElementMaker(makeelement=html_parser.makeelement) # elements A = E.a # anchor ABBR = E.abbr # abbreviated form (e.g., WWW, HTTP, etc.) ACRONYM = E.acronym # ADDRESS = E.address # information on author APPLET = E.applet # Java applet (DEPRECATED) AREA = E.area # client-side image map area B = E.b # bold text style BASE = E.base # document base URI BASEFONT = E.basefont # base font size (DEPRECATED) BDO = E.bdo # I18N BiDi over-ride BIG = E.big # large text style BLOCKQUOTE = E.blockquote # long quotation BODY = E.body # document body
__docformat__ = 'restructuredtext en' import os, calendar, zipfile from threading import RLock from datetime import timedelta from lxml import etree from lxml.builder import ElementMaker from calibre import force_unicode from calibre.utils.date import parse_date, now as nowf, utcnow, tzlocal, \ isoformat, fromordinal from calibre.utils.recycle_bin import delete_file NS = 'http://calibre-ebook.com/recipe_collection' E = ElementMaker(namespace=NS, nsmap={None: NS}) def iterate_over_builtin_recipe_files(): exclude = ['craigslist', 'toronto_sun'] d = os.path.dirname base = os.path.join(d(d(d(d(d(d(os.path.abspath(__file__))))))), 'recipes') for f in os.listdir(base): fbase, ext = os.path.splitext(f) if ext != '.recipe' or fbase in exclude: continue f = os.path.join(base, f) rid = os.path.splitext(os.path.relpath(f, base).replace(os.sep, '/'))[0] yield rid, f
# -*- coding: utf-8 -*- from __future__ import (absolute_import, division, print_function, unicode_literals) from builtins import * from datetime import datetime from lxml.builder import ElementMaker from ferenda.elements import (CompoundElement, OrdinalElement, TemporalElement, UnicodeElement, Link, Paragraph, Section, SectionalElement) E = ElementMaker(namespace="http://www.w3.org/1999/xhtml") class Tidsbestamd(TemporalElement): def in_effect(self, date=None): if date is None: date = datetime.now() # in some cases, a para might have a 'upphor' or # 'ikrafttrader' attribute that is a string, not a date # (typically "den dag regeringen bestämmer") upphor = getattr(self, 'upphor', None) ikrafttrader = getattr(self, 'ikrafttrader', None) return ((isinstance(upphor, datetime) and date < upphor) or (isinstance(ikrafttrader, datetime) and date > ikrafttrader) or (isinstance(upphor, (type(None), str)) and isinstance(ikrafttrader, (type(None), str)))) def as_xhtml(self, uri=None, parent_uri=None, res=None):
#!/usr/bin/env python '''Create, print, and save a new XML document''' from lxml.builder import ElementMaker # <1> import lxml.etree as ET FILE_NAME = 'knights.xml' NAMESPACE_URL = 'http://www.cja-tech.com/knights' # <2> E = ElementMaker( # <3> namespace=NAMESPACE_URL, nsmap={'kt': NAMESPACE_URL}, ) def main(): '''Program entry point''' knight_info = get_knight_info() knight_root = build_tree(knight_info) knight_doc = ET.ElementTree(knight_root) write_doc(knight_doc) def get_knight_info(): '''Read knight data from the file''' info = [] with open('DATA/knights.txt') as kn: for line in kn: flds = line[:-1].split(':') info.append(flds) return info
def __init__(self, namespaceHTMLElements=True): initargs = dict(makeelement=html_parser.makeelement) if namespaceHTMLElements: initargs.update(namespace=XHTML_NAMESPACE, nsmap={None: XHTML_NAMESPACE}) ElementMaker.__init__(self, **initargs)
import os import sys from datetime import datetime import lx import modo import modo.constants as c import cs_cry_export.utils as utils from cs_cry_export import rc, __version__ import cs_cry_export.constants as _c from lxml import etree from lxml.builder import ElementMaker # This doesn't need to be a class attribute E = ElementMaker() class CryDAEBuilder: """ Given a parent CryExport node compile it into a DAE and write it to an xml file dae = CryDAEBuilder(cryexport_node) # Compile collects all the data and creates the main xml file structure dae.compile() # write the data structure to a .dae file dae.write() """ # the dae element xml = E.COLLADA() # the lxo file path scene_root = ""
def create_mets_file(aic, aips): """ Create AIC METS file with AIP information. """ # Prepare constants nsmap = { 'mets': ns.metsNS, 'xlink': ns.xlinkNS, 'xsi': ns.xsiNS, } now = timezone.now().strftime("%Y-%m-%dT%H:%M:%S") # Set up structure E = ElementMaker(namespace=ns.metsNS, nsmap=nsmap) mets = ( E.mets( E.metsHdr(CREATEDATE=now), E.dmdSec( E.mdWrap( E.xmlData(), MDTYPE="DC", # mdWrap ), ID='dmdSec_1', # dmdSec ), E.fileSec(E.fileGrp(), ), E.structMap( E.div( TYPE="Archival Information Collection", DMDID="dmdSec_1", ), TYPE='logical', # structMap ), )) mets.attrib['{{{ns}}}schemaLocation'.format( ns=nsmap['xsi'] )] = "http://www.loc.gov/METS/ http://www.loc.gov/standards/mets/version18/mets.xsd" # Add Dublin Core info xml_data = mets.find('mets:dmdSec/mets:mdWrap/mets:xmlData', namespaces=ns.NSMAP) dublincore = archivematicaCreateMETS2.getDublinCore( archivematicaCreateMETS2.SIPMetadataAppliesToType, aic['uuid']) # Add <extent> with number of AIPs extent = etree.SubElement(dublincore, ns.dctermsBNS + 'extent') extent.text = "{} AIPs".format(len(aips)) xml_data.append(dublincore) # Add elements for each AIP file_grp = mets.find('mets:fileSec/mets:fileGrp', namespaces=ns.NSMAP) struct_div = mets.find('mets:structMap/mets:div', namespaces=ns.NSMAP) for aip in aips: file_id = '{name}-{uuid}'.format(name=aip['name'], uuid=aip['uuid']) etree.SubElement(file_grp, ns.metsBNS + 'file', ID=file_id) label = aip['label'] or aip['name'] div = etree.SubElement(struct_div, ns.metsBNS + 'div', LABEL=label) etree.SubElement(div, ns.metsBNS + 'fptr', FILEID=file_id) print etree.tostring(mets, pretty_print=True) # Write out the file file_uuid = str(uuid.uuid4()) basename = os.path.join('metadata', "METS.{}.xml".format(file_uuid)) filename = os.path.join(aic['dir'], basename) with open(filename, 'w') as f: f.write(etree.tostring(mets, pretty_print=True)) fileOperations.addFileToSIP( filePathRelativeToSIP='%SIPDirectory%' + basename, fileUUID=file_uuid, sipUUID=aic['uuid'], taskUUID=str(uuid.uuid4()), # Unsure what should go here date=now, sourceType="aip creation", use='metadata') # To make this work with the createMETS2 (for SIPs) databaseFunctions.insertIntoDerivations(file_uuid, file_uuid) # Insert the count of AIPs in the AIC into UnitVariables, so it can be # indexed later UnitVariable.objects.create(unittype="SIP", unituuid=aic['uuid'], variable="AIPsinAIC", variablevalue=str(len(aips)))
def __init__(self, tree, path, oeb, opts, profile=None, extra_css='', user_css=''): self.oeb, self.opts = oeb, opts self.profile = profile if self.profile is None: # Use the default profile. This should really be using # opts.output_profile, but I don't want to risk changing it, as # doing so might well have hard to debug font size effects. from calibre.customize.ui import output_profiles for x in output_profiles(): if x.short_name == 'default': self.profile = x break if self.profile is None: # Just in case the default profile is removed in the future :) self.profile = opts.output_profile self.logger = oeb.logger item = oeb.manifest.hrefs[path] basename = os.path.basename(path) cssname = os.path.splitext(basename)[0] + '.css' stylesheets = [html_css_stylesheet()] head = xpath(tree, '/h:html/h:head') if head: head = head[0] else: head = [] # Add cssutils parsing profiles from output_profile for profile in self.opts.output_profile.extra_css_modules: cssprofiles.addProfile(profile['name'], profile['props'], profile['macros']) parser = CSSParser(fetcher=self._fetch_css_file, log=logging.getLogger('calibre.css')) self.font_face_rules = [] for elem in head: if (elem.tag == XHTML('style') and elem.get('type', CSS_MIME) in OEB_STYLES): text = elem.text if elem.text else u'' for x in elem: t = getattr(x, 'text', None) if t: text += u'\n\n' + force_unicode(t, u'utf-8') t = getattr(x, 'tail', None) if t: text += u'\n\n' + force_unicode(t, u'utf-8') if text: text = XHTML_CSS_NAMESPACE + text text = oeb.css_preprocessor(text) stylesheet = parser.parseString(text, href=cssname, validate=False) stylesheet.namespaces['h'] = XHTML_NS stylesheets.append(stylesheet) # Make links to resources absolute, since these rules will # be folded into a stylesheet at the root replaceUrls(stylesheet, item.abshref, ignoreImportRules=True) elif elem.tag == XHTML('link') and elem.get('href') \ and elem.get('rel', 'stylesheet').lower() == 'stylesheet' \ and elem.get('type', CSS_MIME).lower() in OEB_STYLES: href = urlnormalize(elem.attrib['href']) path = item.abshref(href) sitem = oeb.manifest.hrefs.get(path, None) if sitem is None: self.logger.warn( 'Stylesheet %r referenced by file %r not in manifest' % (path, item.href)) continue if not hasattr(sitem.data, 'cssRules'): self.logger.warn( 'Stylesheet %r referenced by file %r is not CSS' % (path, item.href)) continue stylesheets.append(sitem.data) csses = {'extra_css': extra_css, 'user_css': user_css} for w, x in csses.items(): if x: try: text = XHTML_CSS_NAMESPACE + x stylesheet = parser.parseString(text, href=cssname, validate=False) stylesheet.namespaces['h'] = XHTML_NS stylesheets.append(stylesheet) except: self.logger.exception('Failed to parse %s, ignoring.' % w) self.logger.debug('Bad css: ') self.logger.debug(x) rules = [] index = 0 self.stylesheets = set() self.page_rule = {} for stylesheet in stylesheets: href = stylesheet.href self.stylesheets.add(href) for rule in stylesheet.cssRules: rules.extend(self.flatten_rule(rule, href, index)) index = index + 1 rules.sort() self.rules = rules self._styles = {} for _, _, cssdict, text, _ in rules: fl = ':first-letter' in text if fl: text = text.replace(':first-letter', '') selector = get_css_selector(text) matches = selector(tree, self.logger) if fl: from lxml.builder import ElementMaker E = ElementMaker(namespace=XHTML_NS) for elem in matches: for x in elem.iter(): if x.text: punctuation_chars = [] text = unicode(x.text) while text: if not unicodedata.category( text[0]).startswith('P'): break punctuation_chars.append(text[0]) text = text[1:] special_text = u''.join(punctuation_chars) + \ (text[0] if text else u'') span = E.span(special_text) span.tail = text[1:] x.text = None x.insert(0, span) self.style(span)._update_cssdict(cssdict) break else: for elem in matches: self.style(elem)._update_cssdict(cssdict) for elem in xpath(tree, '//h:*[@style]'): self.style(elem)._apply_style_attr(url_replacer=item.abshref) num_pat = re.compile(r'\d+$') for elem in xpath(tree, '//h:img[@width or @height]'): style = self.style(elem) # Check if either height or width is not default is_styled = style._style.get('width', 'auto') != 'auto' or \ style._style.get('height', 'auto') != 'auto' if not is_styled: # Update img style dimension using width and height upd = {} for prop in ('width', 'height'): val = elem.get(prop, '').strip() try: del elem.attrib[prop] except: pass if val: if num_pat.match(val) is not None: val += 'px' upd[prop] = val if upd: style._update_cssdict(upd)
def __call__(self, tag, *children, **attrib): non_null_children = [c for c in children if c is not None] return ElementMaker.__call__(self, tag, *non_null_children, **attrib)
def __call__(self, element, metadata): data = metadata.record OAI_DATACITE = ElementMaker(namespace=self.ns['oai_datacite'], nsmap =self.ns) DATACITE = ElementMaker(namespace=self.ns['datacite']) oai_datacite = OAI_DATACITE.oai_datacite() oai_datacite.attrib['{%s}schemaLocation' % XSI_NS] = '%s %s' % ( self.ns['oai_datacite'], self.schemas['oai_datacite']) el = getattr(OAI_DATACITE, "isReferenceQuality") isReferenceQuality = el('true'); oai_datacite.append(isReferenceQuality) el = getattr(OAI_DATACITE, "schemaVersion") schemaVersion = el('3'); oai_datacite.append(schemaVersion) el = getattr(OAI_DATACITE, "datacentreSymbol") datacentreSymbol = el('RepOD'); oai_datacite.append(datacentreSymbol) payload = OAI_DATACITE.payload() oai_datacite.append(payload) resource = DATACITE.resource() resource.attrib['{%s}schemaLocation' % XSI_NS] = '%s %s' % ( self.ns['datacite'], self.schemas['datacite']) payload.append(resource) if (data['metadata'].get('identifier.doi')): el = getattr(OAI_DATACITE, "identifier") identifier = el(data['metadata'].get('identifier.doi')[0]) identifier.set('identifierType', 'DOI') resource.append(identifier) if (data['metadata'].get('creator')): creators = DATACITE.creators() resource.append(creators) for creator, affiliation in zip(data['metadata'].get('creator'), data['metadata'].get('creator.affiliation')): creatorElement = DATACITE.creator() creators.append(creatorElement) el = getattr(DATACITE, "creatorName") creatorNameElement = el(creator) creatorElement.append(creatorNameElement) el = getattr(DATACITE, "affiliation") affiliationElement = el(affiliation) creatorElement.append(affiliationElement) if (data['metadata'].get('title')): titles = DATACITE.titles() resource.append(titles) for title in data['metadata'].get('title'): el = getattr(DATACITE, "title") titleElement = el(title) titles.append(titleElement) if (data['metadata'].get('publisher')): for publisher in data['metadata'].get('publisher'): el = getattr(DATACITE, "publisher") publisherElement = el(publisher) resource.append(publisherElement) if (data['metadata'].get('date.publication')): for dateAvailable in data['metadata'].get('date.publication'): el = getattr(DATACITE, "publicationYear") publicationYear = el(dateAvailable) resource.append(publicationYear) if (data['metadata'].get('subject')): subjects = DATACITE.subjects() resource.append(subjects) for subject in data['metadata'].get('subject'): el = getattr(DATACITE, "subject") subjectElement = el(subject) subjects.append(subjectElement) contributors = DATACITE.contributors() if (data['metadata'].get('contributor.funder')): funderElement = DATACITE.contributor() funderElement.set('contributorType', 'Funder') el = getattr(DATACITE, "contributorName") funderName = el(data['metadata'].get('contributor.funder')[0]) contributors.append(funderElement) funderElement.append(funderName) if (data['metadata'].get('contributor.fundingProgram') and data['metadata'].get('contributor.grantNumber')): el = getattr(DATACITE, "nameIdentifier") funderId = 'info:eu-repo/grantAgreement/' + \ data['metadata'].get('contributor.funder')[0]+ '/' + \ data['metadata'].get('contributor.fundingProgram')[0] + '/' + \ data['metadata'].get('contributor.grantNumber')[0] funderIdElement = el(funderId) funderIdElement.set('nameIdentifierScheme', 'info') funderElement.append(funderIdElement) resource.append(contributors) if (data['metadata'].get('description')): descriptions = DATACITE.descriptions() resource.append(descriptions) for description in data['metadata'].get('description'): el = getattr(DATACITE, "description") if description: descriptionElement = el(description) descriptions.append(descriptionElement) if (data['metadata'].get('relation.hasPart')): relatedIdentifiers = DATACITE.relatedIdentifiers() resource.append(relatedIdentifiers) for relatedIdentifier in data['metadata'].get('relation.hasPart'): el = getattr(DATACITE, "relatedIdentifier") relatedIdentifierElement = el(relatedIdentifier) relatedIdentifierElement.set('relatedIdentifierType', 'DOI') relatedIdentifierElement.set('relationType', 'HasPart') relatedIdentifiers.append(relatedIdentifierElement) if (data['metadata'].get('relation.isPartOf')): relatedIdentifiers = DATACITE.relatedIdentifiers() resource.append(relatedIdentifiers) for relatedIdentifier in data['metadata'].get('relation.isPartOf'): el = getattr(DATACITE, "relatedIdentifier") relatedIdentifierElement = el(relatedIdentifier) relatedIdentifierElement.set('relatedIdentifierType', 'DOI') relatedIdentifierElement.set('relationType', 'IsPartOf') relatedIdentifiers.append(relatedIdentifierElement) if (data['metadata'].get('type')): formats = DATACITE.formats() resource.append(formats) for format in data['metadata'].get('type'): el = getattr(DATACITE, "format") formatElement = el(format) formats.append(formatElement) if (data['metadata'].get('date.available') or data['metadata'].get('date.modified')): datesElement = DATACITE.dates() resource.append(datesElement) if (data['metadata'].get('date.available')): for dateAvailable in data['metadata'].get('date.available'): el = getattr(DATACITE, "date") dateAvailableElement = el(dateAvailable) dateAvailableElement.set('dateType', 'available') datesElement.append(dateAvailableElement) if (data['metadata'].get('date.modified')): for dateModified in data['metadata'].get('date.modified'): el = getattr(DATACITE, 'date') dateAvailableElement = el(dateModified) dateAvailableElement.set('dateType', 'modified') datesElement.append(dateAvailableElement) if (data['metadata'].get('rights')): rightsListElement = DATACITE.rightsList() resource.append(rightsListElement) el = getattr(DATACITE, 'rights') rightElement = el(data['metadata'].get('rights')[0]) if (data['metadata'].get('rights.uri')): rightElement.set('rightsURI', data['metadata'].get('rights.uri')[0]) rightsListElement.append(rightElement) element.append(oai_datacite)
def get_element_maker(self): return ElementMaker( namespace=self._rng_namespace, nsmap=self._rng_namespace_map, )
def vmnetfs_config(self): # Write URL and validators into file for ease of debugging. # Defer creation of cache directory until needed. ensure_dir(self._urlpath) info_file = os.path.join(self._urlpath, 'info') if not os.path.exists(info_file): with open(info_file, 'w') as fh: fh.write(self._cache_info) # Return XML image element e = ElementMaker(namespace=VMNETFS_NS, nsmap={None: VMNETFS_NS}) origin = e.origin( e.url(self.url), e.offset(str(self.offset)), ) if self.last_modified or self.etag: validators = e.validators() if self.last_modified: validators.append(e('last-modified', str(timegm(self.last_modified.utctimetuple())))) if self.etag: validators.append(e.etag(self.etag)) origin.append(validators) if self.username and self.password: credentials = e.credentials( e.username(self.username), e.password(self.password), ) origin.append(credentials) if self.cookies: cookies = e.cookies() for cookie in self.cookies: c = '%s=%s; Domain=%s; Path=%s' % (cookie.name, cookie.value, cookie.domain, cookie.path) if cookie.expires: c += '; Expires=%s' % format_rfc1123_date(cookie.expires) if cookie.secure: c += '; Secure' if 'httponly' in [k.lower() for k in cookie._rest]: c += '; HttpOnly' cookies.append(e.cookie(c)) origin.append(cookies) return e.image( e.name(self.label), e.size(str(self.size)), origin, e.cache( e.path(self.cache), e('chunk-size', str(self.chunk_size)), ), e.fetch( e.mode('stream' if self.stream else 'demand'), ), )
def merge_xmp_packet(old, new): ''' Merge metadata present in the old packet that is not present in the new one into the new one. Assumes the new packet was generated by metadata_to_xmp_packet() ''' old, new = parse_xmp_packet(old), parse_xmp_packet(new) # As per the adobe spec all metadata items have to be present inside top-level rdf:Description containers item_xpath = XPath('//rdf:RDF/rdf:Description/*') # First remove all data fields that metadata_to_xmp_packet() knowns about, # since either they will have been set or if not present, imply they have # been cleared defined_tags = { expand(prefix + ':' + scheme) for prefix in ('prism', 'pdfx') for scheme in KNOWN_ID_SCHEMES } defined_tags |= { expand('dc:' + x) for x in ('identifier', 'title', 'creator', 'date', 'description', 'language', 'publisher', 'subject') } defined_tags |= { expand('xmp:' + x) for x in ('MetadataDate', 'Identifier') } # For redundancy also remove all fields explicitly set in the new packet defined_tags |= {x.tag for x in item_xpath(new)} calibrens = '{%s}' % NS_MAP['calibre'] for elem in item_xpath(old): if elem.tag in defined_tags or (elem.tag and elem.tag.startswith(calibrens)): elem.getparent().remove(elem) # Group all items into groups based on their namespaces groups = defaultdict(list) for item in item_xpath(new): ns = item.nsmap[item.prefix] groups[ns].append(item) for item in item_xpath(old): ns = item.nsmap[item.prefix] groups[ns].append(item) A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x')) R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf')) root = A.xmpmeta(R.RDF) rdf = root[0] for namespace in sorted(groups, key=lambda x: { NS_MAP['dc']: 'a', NS_MAP['xmp']: 'b', NS_MAP['calibre']: 'c' }.get(x, 'z' + x)): items = groups[namespace] desc = rdf.makeelement(expand('rdf:Description'), nsmap=find_nsmap(items)) desc.set(expand('rdf:about'), '') rdf.append(desc) for item in items: clone_into(desc, item) return serialize_xmp_packet(root)
from lxml.builder import ElementMaker from xades.constants import MAP_HASHLIB, NS_MAP from xades.ns import EtsiNS from xades.utils import dict_compare, rdns_to_map from xmlsig.constants import TransformSha1, TransformUsageDigestMethod from xmlsig.ns import DSigNs from xmlsig.utils import USING_PYTHON2, create_node, get_rdns_name if USING_PYTHON2: import urllib else: import urllib.request as urllib logger = logging.getLogger(__name__) ETSI = ElementMaker(namespace=EtsiNS) DS = ElementMaker(namespace=DSigNs) class BasePolicy(object): """" Policy base class created in order to define different policies. A mixture of base class implementations, and abstract class interface definitions. (TODO: might be separated in the future) """ hash_method = None @property def identifier(self): raise NotImplementedError("Implement on specific subclasses")
def _initXML(self): self._E = ElementMaker( namespace="http://marble.kde.org/satellitecatalog", nsmap={'msc' : "http://marble.kde.org/satellitecatalog"}) self._xml = self._E.MarbleSatelliteCatalog()
from lxml.builder import ElementMaker from calibre.constants import __appname__, __version__ from calibre.ebooks.BeautifulSoup import BeautifulSoup from calibre.ebooks.chardet import xml_to_unicode from calibre.utils.cleantext import clean_xml_chars NCX_NS = "http://www.daisy.org/z3986/2005/ncx/" CALIBRE_NS = "http://calibre.kovidgoyal.net/2009/metadata" NSMAP = { None: NCX_NS, 'calibre':CALIBRE_NS } E = ElementMaker(namespace=NCX_NS, nsmap=NSMAP) C = ElementMaker(namespace=CALIBRE_NS, nsmap=NSMAP) class TOC(list): def __init__(self, href=None, fragment=None, text=None, parent=None, play_order=0, base_path=os.getcwdu(), type='unknown', author=None, description=None, toc_thumbnail=None): self.href = href self.fragment = fragment if not self.fragment: self.fragment = None self.text = text self.parent = parent
def initialize(self): if not self._environment_ready: raise ValueError('setup_environment has not been called') # Load package if self._package is None: source = source_open(self._url, scheme=self.scheme, username=self.username, password=self.password) package = Package(source) else: package = self._package # Validate domain XML domain_xml = DomainXML(package.domain.data) # Create vmnetfs config e = ElementMaker(namespace=VMNETFS_NS, nsmap={None: VMNETFS_NS}) vmnetfs_config = e.config() vmnetfs_config.append(_Image('disk', package.disk, username=self.username, password=self.password).vmnetfs_config) if package.memory: image = _Image('memory', package.memory, username=self.username, password=self.password, stream=True) # Use recompressed memory image if available recompressed_path = image.get_recompressed_path( self.RECOMPRESSION_ALGORITHM) if os.path.exists(recompressed_path): # When started from vmnetx, logging isn't up yet gobject.idle_add(lambda: _log.info('Using recompressed memory image')) image = _Image('memory', SourceRange(source_open(filename=recompressed_path)), stream=True) vmnetfs_config.append(image.vmnetfs_config) # Start vmnetfs self._fs = VMNetFS(vmnetfs_config) self._fs.start() log_path = os.path.join(self._fs.mountpoint, 'log') disk_path = os.path.join(self._fs.mountpoint, 'disk') disk_image_path = os.path.join(disk_path, 'image') if package.memory: memory_path = os.path.join(self._fs.mountpoint, 'memory') self._memory_image_path = os.path.join(memory_path, 'image') # Create recompressed memory image if missing if not os.path.exists(recompressed_path): _MemoryRecompressor(self, self.RECOMPRESSION_ALGORITHM, self._memory_image_path, recompressed_path) else: memory_path = self._memory_image_path = None # Set up libvirt connection self._conn = libvirt.open('qemu:///session') cb = self._conn.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._lifecycle_event, None) self._conn_callbacks.append(cb) # Get emulator path emulator = domain_xml.detect_emulator(self._conn) # Create new viewer password if none existed if self.viewer_password is None: self.viewer_password = base64.urlsafe_b64encode(os.urandom(15)) # Get execution domain XML self._domain_xml = domain_xml.get_for_execution(self._domain_name, emulator, disk_image_path, self.viewer_password).xml # Write domain XML to memory image if self._memory_image_path is not None: with open(self._memory_image_path, 'r+') as fh: hdr = LibvirtQemuMemoryHeader(fh) hdr.xml = self._domain_xml hdr.write(fh) # Set configuration self.vm_name = package.name self._have_memory = memory_path is not None self.max_mouse_rate = domain_xml.max_mouse_rate # Set chunk size path = os.path.join(disk_path, 'stats', 'chunk_size') with open(path) as fh: self.disk_chunk_size = int(fh.readline().strip()) # Create monitors for name in self.STATS: stat = Statistic(name) self.disk_stats[name] = stat self._monitors.append(StatMonitor(stat, disk_path, name)) self._monitors.append(ChunkMapMonitor(self.disk_chunks, disk_path)) log_monitor = LineStreamMonitor(log_path) log_monitor.connect('line-emitted', self._vmnetfs_log) self._monitors.append(log_monitor) if self._have_memory: self._load_monitor = LoadProgressMonitor(memory_path) self._load_monitor.connect('progress', self._load_progress) # Kick off state machine after main loop starts self.state = self.STATE_STOPPED gobject.idle_add(self.emit, 'vm-stopped')
class SpaceObjectCatalog(object): def __init__(self, filename, baseURL): super(SpaceObjectCatalog, self).__init__() self._filename = filename; self._baseURL = baseURL self._file = None self._open() self._initXML(); def __del__(self): self._close() def add(self, space_obj, latest_vector): #url = self._baseURL + "/" + space_obj.filename_prefix + '.txt' #icon = self._baseURL + "/" + space_obj.filename_prefix + '.png' satellite = self._E.satellite( self._E.name(space_obj.name), self._E.category(space_obj._category), self._E.relatedBody(space_obj.related_body), self._E.stateVector( self._E.position( x=str(latest_vector[1]), y=str(latest_vector[2]), z=str(latest_vector[3]) ), self._E.velocity( x=str(latest_vector[4]), y=str(latest_vector[5]), z=str(latest_vector[6]) ), mjd=str(latest_vector[0]) ), #allvectors=url, #icon=icon, ) mission = self._E.mission() if space_obj.mission_start is not None: mission.append(self._E.start(str(space_obj.mission_start))) if space_obj.mission_end is not None: mission.append(self._E.end(str(space_obj.mission_end))) if len(mission): satellite.append(mission) self._xml.append(satellite) print(space_obj.name + " added to object catalog.") def write(self): print("Writing catalog to file: " + self._filename) self._file.write(etree.tostring(self._xml, pretty_print=True, xml_declaration=True, encoding='utf-8')) def _initXML(self): self._E = ElementMaker( namespace="http://marble.kde.org/satellitecatalog", nsmap={'msc' : "http://marble.kde.org/satellitecatalog"}) self._xml = self._E.MarbleSatelliteCatalog() def _open(self): self._file = open(self._filename, 'w+') self._file.truncate() def _close(self): self._file.close()
def close(self): from lxml.builder import ElementMaker logger.debug("%s new entries in %s", self.new_entries, self) if not self._new: logger.debug("no entries to write") return # combine existing and new entries all_entries = {**self._existing, **self._new} logger.debug("writing a total of %s entries", len(all_entries)) # get VRT attributes vrt_affine, vrt_shape = raster.tiles_to_affine_shape( list(all_entries.keys())) vrt_dtype = _gdal_typename(self._output.profile()["dtype"]) vrt_nodata = self._output.output_params["nodata"] # build XML E = ElementMaker() vrt = E.VRTDataset( E.SRS(self._tp.crs.wkt), E.GeoTransform(", ".join(map(str, vrt_affine.to_gdal()))), *[ E.VRTRasterBand( E.NoDataValue(str(vrt_nodata)), E.ColorInterp("Gray"), *[ E.ComplexSource( E.SourceFilename( _tile_path(orig_path=path, for_gdal=True) if path_is_remote(path) else relative_path( path=path, base_dir=os.path.split(self.path)[0]), relativeToVRT="0" if path_is_remote(path) else "1"), E.SourceBand(str(b_idx)), E.SourceProperties( RasterXSize=str(tile.shape.width), RasterYSize=str(tile.shape.height), DataType=vrt_dtype, BlockXSize=str(self._output.profile().get( "blockxsize", self._tp.tile_size)), BlockYSize=str(self._output.profile().get( "blockysize", self._tp.tile_size)), ), E.SrcRect( xOff="0", yOff="0", xSize=str(tile.shape.width), ySize=str(tile.shape.height), ), E.DstRect( xOff=str( list( raster.bounds_to_ranges( out_bounds=tile.bounds, in_affine=vrt_affine, in_shape=vrt_shape))[2]), yOff=str( list( raster.bounds_to_ranges( out_bounds=tile.bounds, in_affine=vrt_affine, in_shape=vrt_shape))[0]), xSize=str(tile.shape.width), ySize=str(tile.shape.height), ), E.NODATA(str(vrt_nodata))) for tile, path in sorted(all_entries.items(), key=operator.itemgetter(1)) ], dataType=vrt_dtype, band=str(b_idx)) for b_idx in range(1, self._output.profile()["count"] + 1) ], rasterXSize=str(vrt_shape.width), rasterYSize=str(vrt_shape.height), ) # generate pretty XML and write xmlstr = minidom.parseString(ET.tostring(vrt)).toprettyxml(indent=" ") if self._bucket: key = "/".join(self.path.split("/")[3:]) logger.debug("upload %s", key) self.bucket_resource.put_object(Key=key, Body=xmlstr) else: logger.debug("write to %s", self.path) with open(self.path, "w") as dst: dst.write(xmlstr)
from eoxserver.core.util.timetools import isoformat from eoxserver.services.opensearch.formats.base import (BaseFeedResultFormat, ns_opensearch, ns_dc, ns_atom, ns_media, ns_owc) # namespace declarations ns_georss = NameSpace("http://www.georss.org/georss", "georss") ns_gml = NameSpace("http://www.opengis.net/gml", "gml") # namespace map nsmap = NameSpaceMap(ns_georss, ns_gml, ns_opensearch, ns_dc, ns_atom, ns_media, ns_owc) # Element factories GEORSS = ElementMaker(namespace=ns_georss.uri, nsmap=nsmap) GML = ElementMaker(namespace=ns_gml.uri, nsmap=nsmap) RSS = ElementMaker(typemap=typemap) class RSSResultFormat(BaseFeedResultFormat): """ RSS result format. """ mimetype = "application/rss+xml" name = "rss" def encode(self, request, collection_id, queryset, search_context): # prepare RSS factory with additional namespaces from search context namespaces = dict(nsmap) namespaces.update(search_context.namespaces)
def generate_atom(entries, feed_url): A = ElementMaker(namespace='http://www.w3.org/2005/Atom', nsmap={None : "http://www.w3.org/2005/Atom"}) entry_elements = [] for entry in entries: entry_elements.append(A.entry( A.id(atom_id(entry=entry)), A.title(entry['title']), A.link(href="%s/%s" % (URL, entry['slug'])), A.updated(entry['date']['rfc3339']), A.content(entry['content_html'], type='html'),)) return tostring(A.feed(A.author( A.name(AUTHOR['name']) ), A.id(atom_id()), A.title(TITLE), A.link(href=URL), A.link(href=feed_url, rel='self'), A.updated(entries[0]['date']['rfc3339']), *entry_elements), pretty_print=True)
__version__ = '4.0.0' LOGGER = logging.getLogger('PYWPS') LOGGER.debug('setting core variables') PYWPS_INSTALL_DIR = os.path.dirname(os.path.abspath(__file__)) NAMESPACES = { 'xlink': "http://www.w3.org/1999/xlink", 'wps': "http://www.opengis.net/wps/1.0.0", 'ows': "http://www.opengis.net/ows/1.1", 'gml': "http://www.opengis.net/gml", 'xsi': "http://www.w3.org/2001/XMLSchema-instance" } E = ElementMaker() WPS = ElementMaker(namespace=NAMESPACES['wps'], nsmap=NAMESPACES) OWS = ElementMaker(namespace=NAMESPACES['ows'], nsmap=NAMESPACES) OGCTYPE = { 'measure': 'urn:ogc:def:dataType:OGC:1.1:measure', 'length': 'urn:ogc:def:dataType:OGC:1.1:length', 'scale': 'urn:ogc:def:dataType:OGC:1.1:scale', 'time': 'urn:ogc:def:dataType:OGC:1.1:time', 'date': 'urn:ogc:def:dataType:OGC:1.1:date', 'dateTime': 'urn:ogc:def:dataType:OGC:1.1:dateTime', 'gridLength': 'urn:ogc:def:dataType:OGC:1.1:gridLength', 'angle': 'urn:ogc:def:dataType:OGC:1.1:angle', 'lengthOrAngle': 'urn:ogc:def:dataType:OGC:1.1:lengthOrAngle', 'string': 'urn:ogc:def:dataType:OGC:1.1:string', 'positiveInteger': 'urn:ogc:def:dataType:OGC:1.1:positiveInteger',
def dump_record(record): """Dump a single record.""" rec_element = ElementMaker( namespace=self.MARC21_NS, nsmap={prefix: self.MARC21_NS} ) rec = element.record() rec.append(element.recordPacking('xml')) rec.append(element.recordSchema('marcxml')) rec_record_data = element.recordData() rec_data = rec_element.record() rec_data.attrib['xmlns'] = self.MARC21_NS rec_data.attrib['type'] = "Bibliographic" leader = record.get('leader') if leader: rec_data.append(element.leader(leader)) if isinstance(record, GroupableOrderedDict): items = record.iteritems(with_order=False, repeated=True) else: items = iteritems(record) for df, subfields in items: # Control fields if len(df) == 3: if isinstance(subfields, string_types): controlfield = element.controlfield(subfields) controlfield.attrib['tag'] = df[0:3] rec_data.append(controlfield) elif isinstance(subfields, (list, tuple, set)): for subfield in subfields: controlfield = element.controlfield(subfield) controlfield.attrib['tag'] = df[0:3] rec_data.append(controlfield) else: # Skip leader. if df == 'leader': continue if not isinstance(subfields, (list, tuple, set)): subfields = (subfields,) df = df.replace('_', ' ') for subfield in subfields: if not isinstance(subfield, (list, tuple, set)): subfield = [subfield] for s in subfield: datafield = element.datafield() datafield.attrib['tag'] = df[0:3] datafield.attrib['ind1'] = df[3] datafield.attrib['ind2'] = df[4] if isinstance(s, GroupableOrderedDict): items = s.iteritems( with_order=False, repeated=True) elif isinstance(s, dict): items = iteritems(s) else: datafield.append(element.subfield(s)) items = tuple() for code, value in items: if not isinstance(value, string_types): if value: for v in value: datafield.append( element.subfield(v, code=code)) else: datafield.append(element.subfield( value, code=code)) rec_data.append(datafield) rec_record_data.append(rec_data) rec.append(rec_record_data) return rec
import unittest from chameleon.i18n import i18nize from lxml.builder import ElementMaker E = ElementMaker(namespace=i18nize.NSMAP["xhtml"], nsmap={None: i18nize.NSMAP["xhtml"]}) class HasTextTests(unittest.TestCase): def testNone(self): self.assertEqual(i18nize.hasText(None), False) def testWhitespace(self): self.assertEqual(i18nize.hasText(" \t\n"), False) def testHtmlComment(self): self.assertEqual(i18nize.hasText("<!-- this is not relevant -->"), False) def testExpansion(self): self.assertEqual(i18nize.hasText("${myprecious}"), False) def testPlainText(self): self.assertEqual(i18nize.hasText("myprecious"), True) def testTextAndExpansion(self): self.assertEqual(i18nize.hasText("myprecious ${ring}"), True) class MustTranslateTests(unittest.TestCase): def testEmptyElement(self):
def format_tag_string(tags, sep, joinval=', '): if tags: tlist = tags if sep is None else [t.strip() for t in tags.split(sep)] else: tlist = [] tlist.sort(key=sort_key) return joinval.join(tlist) if tlist else '' # Vocabulary for building OPDS feeds {{{ DC_NS = 'http://purl.org/dc/terms/' E = ElementMaker(namespace='http://www.w3.org/2005/Atom', nsmap={ None : 'http://www.w3.org/2005/Atom', 'dc' : DC_NS, 'opds' : 'http://opds-spec.org/2010/catalog', }) FEED = E.feed TITLE = E.title ID = E.id ICON = E.icon def UPDATED(dt, *args, **kwargs): return E.updated(as_utc(dt).strftime('%Y-%m-%dT%H:%M:%S+00:00'), *args, **kwargs) LINK = partial(E.link, type='application/atom+xml')
def serialize_search(self, pid_fetcher, search_result, links=None, item_links_factory=None, **kwargs): """Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response. """ total = search_result['hits']['total']['value'] sru = search_result['hits'].get('sru', {}) start_record = sru.get('start_record', 0) maximum_records = sru.get('maximum_records', 0) query = sru.get('query') query_es = sru.get('query_es') next_record = start_record + maximum_records + 1 element = ElementMaker() xml_root = element.searchRetrieveResponse() if sru: xml_root.append(element.version('1.1')) xml_root.append(element.numberOfRecords(str(total))) xml_records = element.records() language = request.args.get('ln', DEFAULT_LANGUAGE) for hit in search_result['hits']['hits']: record = hit['_source'] pid = record['pid'] record = self.transform_search_hit( pid=pid, record=record, links_factory=item_links_factory, language=language, **kwargs ) element_record = simpledc.dump_etree( record, container=self.container_element, nsmap=self.namespace, attribs=self.container_attribs ) xml_records.append(element_record) xml_root.append(xml_records) if sru: echoed_search_rr = element.echoedSearchRetrieveRequest() if query: echoed_search_rr.append(element.query(query)) if query_es: echoed_search_rr.append(element.query_es(query_es)) if start_record: echoed_search_rr.append(element.startRecord(str(start_record))) if next_record > 1 and next_record < total: echoed_search_rr.append( element.nextRecordPosition(str(next_record))) if maximum_records: echoed_search_rr.append(element.maximumRecords( str(maximum_records))) echoed_search_rr.append(element.recordPacking('XML')) xml_root.append(echoed_search_rr) # Maybe needed if we use this serialiser directly with documents # else: # xml_links = element.links() # self_link = links.get('self') # if self_link: # xml_links.append(element.self(f'{self_link}&format=dc')) # next_link = links.get('next') # if next_link: # xml_links.append(element.next(f'{next_link}&format=dc')) # xml_root.append(xml_links) return etree.tostring(xml_root, encoding='utf-8', method='xml', pretty_print=True)