def export(request): start = request.GET.get('start') response = HttpResponse(mimetype='application/xml') query = Entry.all() if start: query.filter('word >=', start) query.filter('valid =', True) query.order('word') entries = query.fetch(EXPORT_CHUNK_SIZE) response.write('<exported-data>\n') for entry in entries: if not entry.valid: continue response.write('<Entry>\n') response.write('<word>%s</word>\n' % entry.word) response.write('<pos>%s</pos>\n' % entry.pos) if entry.props: response.write('<props>%s</props>\n' % xml_escape(entry.props)) if entry.stem: response.write('<stem>%s</stem>\n' % entry.stem) if entry.etym: response.write('<etym>%s</etym>\n' % entry.etym) if entry.comment: response.write('<comment>%s</comment>\n' % xml_escape(entry.comment)) response.write('<editors>') for ekey in entry.editors: editor = db.get(ekey) response.write('<name>%s</name>' % editor.username) response.write('</editors>\n') response.write('<editor>%s</editor>\n' % entry.editor.username) response.write('<date>%s</date>\n' % entry.date.strftime('%Y-%m-%d %H:%M:%S')) response.write('</Entry>\n') response.write('</exported-data>\n') return response
def renderFailure(err, ticket, request): log_cas_event("Failed to validate ticket.", [ ('client_ip', request.getClientIP()), ('ticket', ticket)]) err.trap(InvalidTicket, InvalidProxyCallback, InvalidService) request.setResponseCode(403) code = "INVALID_TICKET" msg = "Validation failed for ticket '%s'." % ticket if err.check(InvalidTicketSpec): code = "INVALID_TICKET_SPEC" elif err.check(InvalidProxyCallback): code = "INVALID_PROXY_CALLBACK" msg = "Invalid proxy callback." elif err.check(InvalidService): code = "INVALID_SERVICE" msg = "Invalid service." doc_fail = dedent("""\ <cas:serviceResponse xmlns:cas="http://www.yale.edu/tp/cas"> <cas:authenticationFailure code="%(code)s"> %(msg)s. </cas:authenticationFailure> </cas:serviceResponse> """) % { 'code': xml_escape(code), 'msg': xml_escape(msg),} return doc_fail
def _process_report(self): while True: result = (yield) d = {k: getattr(result, k, '') for k in ('category', 'package', 'version')} d['class'] = xml_escape(result.name) d['msg'] = xml_escape(result.desc) self.out.write(self.scope_map.get(result.scope, self.result_template) % d)
def to_text_html_resources(self, retval, raw): """Add the resources details to the HTML output. :param retval: The list of strings which is used to collect the HTML response. :type retval: list :param raw: The original return value of this resources :func:`GET` method. :type raw: Dictionary """ for resource in raw: retval.append('<div class="resource_details">') retval.append('<h2 id="{0}">{0}</h2>'.format( xml_escape(resource['name']))) if resource['desc']: retval.append('<p class="desc">{0}</p>'.format(xml_escape(resource['desc']))) retval.append('<table class="config">') retval.append('<tr><th>Path</th><td>{0}</td>'.format(xml_escape( resource['path']))) representations = [value + ' (.' + key + ')' for key, value in resource['properties']['EXTENSION_MAP'].iteritems()] retval.append('<tr><th>Representations</th><td>{0}</td>'.format( xml_escape(', '.join(representations)))) retval.append('</table>') self.to_text_html_methods(retval, resource) retval.append('</div>')
def failureResult(err, targetService, pgt, request): log_cas_event("Failed to issue proxy ticket", [ ('client_ip', request.getClientIP()), ('targetService', targetService), ('PGT', pgt),]) if not err.check(InvalidTicket, InvalidService): log.err(err) code = "INTERNAL_ERROR" msg = "An internal error occured." if err.check(InvalidTicket): code = "BAD_PGT" msg = "PGT '%s' is invalid." % pgt elif err.check(InvalidService): code = "INVALID_SERVICE" msg = "Target service is not authorized." return dedent("""\ <cas:serviceResponse xmlns:cas="http://www.yale.edu/tp/cas"> <cas:proxyFailure code="%(code)s"> %(msg)s </cas:proxyFailure> </cas:serviceResponse> """) % { 'code': xml_escape(code), 'msg': xml_escape(msg), }
def send_error(self, status, message=None, code='', resource='', extra_headers=None): if not message: try: (_, message) = self.responses[status] except KeyError: message = 'Unknown' self.log_error("code %d, message %s", status, message) content = (ERROR_RESPONSE_TEMPLATE % { 'code': code, 'message': xml_escape(message), 'request_id': 42, 'resource': xml_escape(resource) }).encode('utf-8', 'replace') self.send_response(status, message) self.send_header("Content-Type", 'text/xml; charset="utf-8"') self.send_header("Content-Length", str(len(content))) if extra_headers: for (name, value) in extra_headers.items(): self.send_header(name, value) self.end_headers() if self.command != 'HEAD' and status >= 200 and status not in (204, 304): self.wfile.write(content)
def saveXML(self): try: fp = file(self.filename, 'w') fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n") fp.write("<ci>\n") fp.write("\t<slot>\n") fp.write("\t\t<id>%s</id>\n" % self.ci_slot) for item in self.selectedcaid: if len(self.selectedcaid): fp.write("\t\t<caid id=\"%s\" />\n" % item[0]) for item in self.servicelist: if len(self.servicelist): psname = xml_escape(item[0]) psattr = xml_escape(item[3]) if item[2] == 1: fp.write( "\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (psname, psattr)) else: fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (psname, psattr)) fp.write("\t</slot>\n") fp.write("</ci>\n") fp.flush() fsync(fp.fileno()) fp.close() except: print "[CI_Config_CI%d] xml not written" % self.ci_slot os_unlink(self.filename)
def _notifyServicesSLO(self, services): template = self._samlLogoutTemplate def logerr(err, service): log.msg("Error sending SLO to service '%s'." % service) log.err(err) errs = unwrap_failures(err) for error in errs: log.err(error) return err dlist = [] for service, st in services.iteritems(): dt = datetime.datetime.utcnow() issue_instant = dt.strftime("%Y-%m-%dT%H:%M:%S") identifier = str(uuid.uuid4()) data = template % { 'identifier': xml_escape(identifier), 'issue_instant': xml_escape(issue_instant), 'service_ticket': xml_escape(st) } httpClient = self.httpClientFactory(self.reactor) d = httpClient.post( service.encode('utf-8'), headers=Headers({'Content-Type': ['application/xml']}), data=data.encode('utf-8'), timeout=30).addCallback(treq.content).addErrback( logerr, service) dlist.append(d) return defer.DeferredList(dlist, consumeErrors=True)
def upload_results(self, results, path, message=None): """ Upload query results CSV to Amazon S3 :param tuple results: query results for upload :param str path: bucket path :param str message: text to display before download link :return: link text on successful upload, failure message if s3 upload failed """ timer = statsd.timer('bux_sql_grader.upload_results').start() if not message: message = "Download full results" # Convert result rows to CSV csv_results = self.csv_results(results) # Upload to S3 s3_url = self.upload_to_s3(csv_results, path) if s3_url: context = {"url": xml_escape(s3_url), "message": xml_escape(message), "icon_src": xml_escape(self.download_icon)} download_link = DOWNLOAD_LINK.substitute(context) else: download_link = UPLOAD_FAILED_MESSAGE timer.stop() return download_link
def GET(self, env, start_response): """ Handle GET Service request """ env['QUERY_STRING'] = 'format=json' body_iter = self._app_call(env) status = self._get_status_int() if status != HTTP_OK: if status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN): return get_err_response('AccessDenied') else: return get_err_response('InvalidURI') containers = loads(''.join(list(body_iter))) # we don't keep the creation time of a backet (s3cmd doesn't # work without that) so we use something bogus. if containers: owner = containers[0].get('owner', '') else: owner = '' body = '<?xml version="1.0" encoding="UTF-8"?>' \ '<ListAllMyBucketsResult ' \ 'xmlns="http://doc.s3.amazonaws.com/2006-03-01">'\ '<Owner><ID>%s</ID><DisplayName>%s</DisplayName></Owner>'\ '<Buckets>%s</Buckets>' \ '</ListAllMyBucketsResult>' \ % (xml_escape(owner), xml_escape(owner), "".join(['<Bucket><Name>%s</Name><CreationDate>' '2009-02-03T16:45:09.000Z</CreationDate></Bucket>' % xml_escape(i['name']) for i in containers])) resp = Response(status=HTTP_OK, content_type='application/xml', body=body) return resp
def create_storage_pool(conn, path, pool_name='default'): """Create directory pool.""" # FIXME: support other types than dir xml = ''' <pool type="dir"> <name>%(pool)s</name> <target> <path>%(path)s</path> </target> </pool> ''' % { 'pool': xml_escape(pool_name), 'path': xml_escape(path), } try: pool = conn.storagePoolDefineXML(xml, 0) pool.setAutostart(True) pool.create(0) except libvirt.libvirtError as ex: logger.error(ex) raise StorageError( _('Error creating storage pool "%(pool)s": %(error)s'), pool=pool_name, error=ex.get_error_message(), )
def saveXML(self): try: fp = file(self.filename, 'w') fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n") fp.write("<ci>\n") fp.write("\t<slot>\n") fp.write("\t\t<id>%s</id>\n" % self.ci_slot) for item in self.selectedcaid: if len(self.selectedcaid): fp.write("\t\t<caid id=\"%s\" />\n" % item[0]) for item in self.servicelist: if len(self.servicelist): psname = xml_escape(item[0]) psattr = xml_escape(item[3]) if item[2]==1: fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (psname, psattr)) else: fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (psname, psattr)) fp.write("\t</slot>\n") fp.write("</ci>\n") fp.flush() fsync(fp.fileno()) fp.close() except: print "[CI_Config_CI%d] xml not written" %self.ci_slot os_unlink(self.filename)
def __init__(self, app, name, hotkey): self.app = app self.name = name self.hotkey = hotkey # configuration values self.examination_time = self.app.conf["examination_time"] # remove window from container builder = self.app.create_builder() self.window = builder.get_object("player_window") container = builder.get_object("player_window_container") container.remove(self.window) # set player name builder.get_object("player_name").set_markup(self.player_markup % xml_escape(name)) # attach self.app.attach(self) builder.get_object("stop_button_label").set_markup(self.button_markup % xml_escape(button_name(self.hotkey))) # cache object refs self.time_label = builder.get_object("time_label") self.reset() builder.connect_signals(self) self.window.show()
def _notifyServicesSLO(self, services): template = self._samlLogoutTemplate def logerr(err, service): log.msg("Error sending SLO to service '%s'." % service) log.err(err) errs = unwrap_failures(err) for error in errs: log.err(error) return err dlist = [] for service, st in services.iteritems(): dt = datetime.datetime.utcnow() issue_instant = dt.strftime("%Y-%m-%dT%H:%M:%S") identifier = str(uuid.uuid4()) data = template % { 'identifier': xml_escape(identifier), 'issue_instant': xml_escape(issue_instant), 'service_ticket': xml_escape(st) } httpClient = self.httpClientFactory(self.reactor) d = httpClient.post( service.encode('utf-8'), headers=Headers({'Content-Type': ['application/xml']}), data=data.encode('utf-8'), timeout=30).addCallback( treq.content).addErrback( logerr, service) dlist.append(d) return defer.DeferredList(dlist, consumeErrors=True)
def iter_doc_annotations_xml_text( doc_annotations: List[Tuple[str, str]]) -> Iterable[str]: LOGGER.debug('doc_annotations: %s', doc_annotations) text_tokens = [token_text for token_text, _ in doc_annotations] token_labels = [token_label for _, token_label in doc_annotations] entity_chunks = list(iter_add_untagged_token_spans( get_entities(token_labels), len(token_labels) )) LOGGER.debug('text_tokens: %s', text_tokens) LOGGER.debug('token_labels: %s', token_labels) LOGGER.debug('entity_chunks: %s', entity_chunks) return '\n'.join(( ( ' <{tag}>{text}</{tag}>'.format( tag=get_xml_tag_for_annotation_label(chunk_type), text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1])) ) if chunk_type else ' {text}'.format( text=xml_escape(' '.join(text_tokens[chunk_start:chunk_end + 1])) ) ) for chunk_type, chunk_start, chunk_end in entity_chunks )) + '\n'
def create_xml(self): template = u'<attendees><person>%(first)s%(last)s%(email)s</person>%(session_key)s<joinStatus>ACCEPT</joinStatus><emailInvitations>TRUE</emailInvitations></attendees>' template_bits = { 'first': self.first_name and '<firstName>%s</firstName>'%xml_escape(self.first_name) or '', 'last': self.last_name and '<lastName>%s</lastName>'%xml_escape(self.last_name) or '', 'email': '<email>%s</email>'%xml_escape(self.email), 'session_key': '<sessionKey>%s</sessionKey>'%self.event.session_key } return template % template_bits
def create_xml(cfg): """ convert config dict to xml """ xml= "<?xml version='1.0' encoding='utf-8' standalone='yes' ?>\n" xml += "<map>\n" for k,v in cfg.items(): xml += " <string name=\"%s\">%s</string>\n" % (xml_escape(k), xml_escape(v)) xml += "</map>\n" return xml
def _delete_multi(self, keys, force=False): body = ['<Delete>'] esc_prefix = xml_escape(self.prefix) for key in keys: body.append('<Object><Key>%s%s</Key></Object>' % (esc_prefix, xml_escape(key))) body.append('</Delete>') body = '\n'.join(body).encode('utf-8') headers = {'content-type': 'text/xml; charset=utf-8'} resp = self._do_request('POST', '/', subres='delete', body=body, headers=headers) try: root = self._parse_xml_response(resp) ns_p = self.xml_ns_prefix error_tags = root.findall(ns_p + 'Error') if not error_tags: # No errors occured, everything has been deleted del keys[:] return # Some errors occured, so we need to determine what has # been deleted and what hasn't offset = len(self.prefix) for tag in root.findall(ns_p + 'Deleted'): fullkey = tag.find(ns_p + 'Key').text assert fullkey.startswith(self.prefix) keys.remove(fullkey[offset:]) if log.isEnabledFor(logging.DEBUG): for errtag in error_tags: log.debug('Delete %s failed with %s', errtag.findtext(ns_p + 'Key')[offset:], errtag.findtext(ns_p + 'Code')) # If *force*, just modify the passed list and return without # raising an exception, otherwise raise exception for the first error if force: return errcode = error_tags[0].findtext(ns_p + 'Code') errmsg = error_tags[0].findtext(ns_p + 'Message') errkey = error_tags[0].findtext(ns_p + 'Key')[offset:] if errcode == 'NoSuchKeyError': raise NoSuchObject(errkey) else: raise get_S3Error(errcode, 'Error deleting %s: %s' % (errkey, errmsg)) except: self.conn.discard()
def _generate_loc_file(self): # Only generate this junk if needed if not self._strings and not self._journals: return def write_line(value, *args, **kwargs): # tabs suck, then you die... whitespace = " " * kwargs.pop("indent", 0) if args or kwargs: value = value.format(*args, **kwargs) line = "".join((whitespace, value, "\n")) stream.write(line.encode("utf-16_le")) age_name = self._age_name enc = plEncryptedStream.kEncAes if self._version == pvEoa else None file_name = "{}.loc".format(age_name) with self._generate_file(file_name, enc=enc) as stream: # UTF-16 little endian byte order mark stream.write(b"\xFF\xFE") write_line("<?xml version=\"1.0\" encoding=\"utf-16\"?>") write_line("<localizations>") write_line("<age name=\"{}\">", age_name, indent=1) # Arbitrary strings defined by something like a GUI or a node tree for set_name, elements in self._strings.items(): write_line("<set name=\"{}\">", set_name, indent=2) for element_name, translations in elements.items(): write_line("<element name=\"{}\">", element_name, indent=3) for language_name, value in translations.items(): write_line( "<translation language=\"{language}\">{translation}</translation>", language=language_name, translation=xml_escape(value), indent=4) write_line("</element>", indent=3) write_line("</set>", indent=2) # Journals if self._journals: write_line("<set name=\"Journals\">", indent=2) for journal_name, translations in self._journals.items(): write_line("<element name=\"{}\">", journal_name, indent=3) for language_name, value in translations.items(): write_line( "<translation language=\"{language}\">{translation}</translation>", language=language_name, translation=xml_escape(value), indent=4) write_line("</element>", indent=3) write_line("</set>", indent=2) # Verbose XML junk... # <Deledrius> You call it verbose. I call it unambiguously complete. write_line("</age>", indent=1) write_line("</localizations>")
def store_photo(user_id, credentials, photoset_id, photo, title, description, tags, on_success, on_error): """ this will call on_success with a dictionary of the new image, including 'id' and 'url' """ # in smugmug, the base64-encoded photo is what we need to upload def internal_on_success(content): "it's XML, let's just pass it for now" on_success(content) def internal_on_error(error): on_error("couldn't upload image: %s" % error) # prepare a multipart-mime message # first the Atom description (FIXME: XML inline is kinda ugly) metadata = """ <entry xmlns='http://www.w3.org/2005/Atom'> <title>%s</title> <summary>%s</summary> <category scheme="http://schemas.google.com/g/2005#kind" term="http://schemas.google.com/photos/2007#photo"/> </entry> """ % (xml_escape(title), xml_escape(description)) # treat the photo as a file photo_file = cStringIO.StringIO(base64.b64decode(photo)) boundary, body = utils.multipart_encode( vars={}, vars_with_types=[("application/atom+xml", metadata)], files=[("photo", "thefile.jpg", photo_file, "image/jpg")]) # prepend a bogus line, as per picasa spec (is this MIME?) full_body = """ Media multipart posting %s""" % body headers = { "Content-Type": "multipart/related; boundary=" + boundary, "Content-Length": str(len(full_body)), "MIME-Version": "1.0", } _signed_request( "POST", "https://picasaweb.google.com/data/feed/api/user/default/albumid/%s" % photoset_id, params=full_body, oauth_extra_params=None, credentials=credentials, on_success=internal_on_success, on_error=internal_on_error, headers=headers)
def build_response(self, correct, score, hints, student_results, student_warnings=[], grader_results=None, grader_warnings=[], row_limit=None, download_link=""): """ Builds a grader response dict. """ response = {"correct": correct, "score": score} # Response message template context context = {"download_link": download_link, "hints": ""} # Generate student response results table context["student_results"] = self.html_results(student_results, row_limit) # Generate warning messages if queries had to be modified notices = "" if student_warnings: student_warnings = [xml_escape(notice) for notice in student_warnings] student_warning = "<strong>Warning</strong><p>" student_warning += "</p><p>".join(student_warnings) + "</p>" notices += WARNING_TMPL.substitute(msg=student_warning) if grader_warnings: grader_warnings = [xml_escape(notice) for notice in grader_warnings] grader_warning = "<strong>Warning</strong><p>" grader_warning += "</p><p>".join(grader_warnings) + "</p>" notices += WARNING_TMPL.substitute(msg=grader_warning) context["notices"] = notices if grader_results and not correct: # Generate grader response results table context["grader_results"] = self.html_results(grader_results, row_limit) # Generate hints markup if hints were provided if hints: # Ensure hint text is XML-safe hints = [xml_escape(hint) for hint in hints] hints_html = "<strong>Hints</strong>" hints_html += "<ul><li>" hints_html += "</li><li>".join(hints) hints_html += "</li></ul>" context["hints"] = hints_html # Incorrect response template response["msg"] = INCORRECT_QUERY.substitute(context) else: # Correct response template response["msg"] = CORRECT_QUERY.substitute(context) return response
def _process_report(self): while True: result = (yield) d = { k: getattr(result, k, '') for k in ('category', 'package', 'version') } d['class'] = xml_escape(result.__class__.__name__) d['msg'] = xml_escape(result.desc) self.out.write(self.scope_map[result.scope] % d)
def __init__(self, code, message=None): assert isinstance(code, int) # Being a bit paranoid about the API self.code = code self.reason, self.message = http_responses[code] if message is not None: self.message = message self.text = ERROR_DOCUMENT_TEMPLATE % dict(code=self.code, reason=xml_escape(self.reason), message=xml_escape(self.message)) self.headers = [("Content-Type", "text/html")]
def innerXML(node): if len(node) > 0: text = xml_escape(node.text or '') for child in node: text += outterXML(child) if child.tail: text += xml_escape(child.tail) return text or '' else: return xml_escape(node.text or '')
def _get_attrs_str(self): attr_decls = [] for attr, value in self._attrs.items(): if value is not None: attr_decls.append('%s=%s' % (xml_escape(attr), attr_escape(str(value)))) else: attr_decls.append('%s' % xml_escape(attr)) return ' '.join(attr_decls)
def _delete_multi(self, keys, force=False): body = [ '<Delete>' ] esc_prefix = xml_escape(self.prefix) for key in keys: body.append('<Object><Key>%s%s</Key></Object>' % (esc_prefix, xml_escape(key))) body.append('</Delete>') body = '\n'.join(body).encode('utf-8') headers = { 'content-type': 'text/xml; charset=utf-8' } resp = self._do_request('POST', '/', subres='delete', body=body, headers=headers) try: root = self._parse_xml_response(resp) ns_p = self.xml_ns_prefix error_tags = root.findall(ns_p + 'Error') if not error_tags: # No errors occured, everything has been deleted del keys[:] return # Some errors occured, so we need to determine what has # been deleted and what hasn't offset = len(self.prefix) for tag in root.findall(ns_p + 'Deleted'): fullkey = tag.find(ns_p + 'Key').text assert fullkey.startswith(self.prefix) keys.remove(fullkey[offset:]) if log.isEnabledFor(logging.DEBUG): for errtag in error_tags: log.debug('Delete %s failed with %s', errtag.findtext(ns_p + 'Key')[offset:], errtag.findtext(ns_p + 'Code')) # If *force*, just modify the passed list and return without # raising an exception, otherwise raise exception for the first error if force: return errcode = error_tags[0].findtext(ns_p + 'Code') errmsg = error_tags[0].findtext(ns_p + 'Message') errkey = error_tags[0].findtext(ns_p + 'Key')[offset:] if errcode == 'NoSuchKeyError': raise NoSuchObject(errkey) else: raise get_S3Error(errcode, 'Error deleting %s: %s' % (errkey, errmsg)) finally: # Need to read rest of response while True: buf = resp.read(BUFSIZE) if buf == b'': break
def build_translation_memory_file(creation_date, locale_code, entries): """ TMX files will contain large amount of entries and it's impossible to render all the data with django templates. Rendering of string in memory is a lot faster. :arg datetime creation_date: when TMX file is being created. :arg str locale_code: code of a locale :arg list entries: A list which contains tuples with following items: * resource_path - path of a resource, * key - key of an entity, * source - source string of entity, * target - translated string, * project_name - name of a project, * project_slug - slugified name of a project, """ yield ( u'<?xml version="1.0" encoding="utf-8" ?>' u'\n<tmx version="1.4">' u'\n\t<header' u' adminlang="en-US"' u' creationtoolversion="0.1"' u' creationtool="pontoon"' u' datatype="plaintext"' u' segtype="sentence"' u' o-tmf="plain text"' u' srclang="en-US"' u' creationdate="%(creation_date)s">' u'\n\t</header>' u'\n\t<body>' % { 'creation_date': creation_date.isoformat() } ) for resource_path, key, source, target, project_name, project_slug in entries: tuid = ':'.join((project_slug, slugify(resource_path), slugify(key))) yield ( u'\n\t\t<tu tuid=%(tuid)s srclang="en-US">' u'\n\t\t\t<tuv xml:lang="en-US">' u'\n\t\t\t\t<seg>%(source)s</seg>' u'\n\t\t\t</tuv>' u'\n\t\t\t<tuv xml:lang=%(locale_code)s>' u'\n\t\t\t\t<seg>%(target)s</seg>' u'\n\t\t\t</tuv>' u'\n\t\t</tu>' % { 'tuid': quoteattr(tuid), 'source': xml_escape(source), 'locale_code': quoteattr(locale_code), 'target': xml_escape(target), 'project_name': xml_escape(project_name), } ) yield ( u'\n\t</body>' u'\n</tmx>' )
def build_translation_memory_file(creation_date, locale_code, entries): """ TMX files will contain large amount of entries and it's impossible to render all the data with django templates. Rendering of string in memory is a lot faster. :arg datetime creation_date: when TMX file is being created. :arg str locale_code: code of a locale :arg list entries: A list which contains tuples with following items: * resource_path - path of a resource, * key - key of an entity, * source - source string of entity, * target - translated string, * project_name - name of a project, * project_slug - slugified name of a project, """ yield ( u'<?xml version="1.0" encoding="utf-8" ?>' u'\n<tmx version="1.4">' u'\n\t<header' u' adminlang="en-US"' u' creationtoolversion="0.1"' u' creationtool="pontoon"' u' datatype="plaintext"' u' segtype="sentence"' u' o-tmf="plain text"' u' srclang="en-US"' u' creationdate="%(creation_date)s">' u'\n\t</header>' u'\n\t<body>' % { 'creation_date': creation_date.isoformat() } ) for resource_path, key, source, target, project_name, project_slug in entries: tuid = ':'.join((project_slug, resource_path, slugify(key))) yield ( u'\n\t\t<tu tuid=%(tuid)s srclang="en-US">' u'\n\t\t\t<tuv xml:lang="en-US">' u'\n\t\t\t\t<seg>%(source)s</seg>' u'\n\t\t\t</tuv>' u'\n\t\t\t<tuv xml:lang=%(locale_code)s>' u'\n\t\t\t\t<seg>%(target)s</seg>' u'\n\t\t\t</tuv>' u'\n\t\t</tu>' % { 'tuid': quoteattr(tuid), 'source': xml_escape(source), 'locale_code': quoteattr(locale_code), 'target': xml_escape(target), 'project_name': xml_escape(project_name), } ) yield ( u'\n\t</body>' u'\n</tmx>' )
def __init__(self, code, message=None): assert isinstance(code, int) # Being a bit paranoid about the API self.code = code self.reason, self.message = http_responses[code] if message is not None: self.message = message self.text = ERROR_DOCUMENT_TEMPLATE % dict( code=self.code, reason=xml_escape(self.reason), message=xml_escape(self.message)) self.headers = [("Content-Type", "text/html")]
def _on_torrent_status(self, torrent, d): for key in self.XSS_VULN_KEYS: try: if key == 'peers': for peer in torrent[key]: peer['client'] = xml_escape(peer['client']) else: torrent[key] = xml_escape(torrent[key]) except KeyError: pass d.callback(torrent)
def cfg2dot(fin, fout): import json src = json.load(fin)['cfgs'] fout.write('digraph G {\n') fout.write(' node [shape=record];\n') fnidx = 0 for fnname, fn in src.iteritems(): skipexit2 = True for node in fn['nodes']: for succ in node[1]: if succ[1] != 2 and fn['nodes'][succ[0]][0] == 'exit' and fn['nodes'][succ[0]][2][0][1] == '2': skipexit2 = False break if not skipexit2: break fout.write(' entry%d [label="%s"];\n' % (fnidx, xml_escape(fnname))) fout.write(' entry%d -> node%d_%d;\n' % (fnidx, fnidx, fn['entry'])) for i, node in enumerate(fn['nodes']): if skipexit2 and node[0] == 'exit' and node[2][0][1] == '2': continue if node[0] == 'call': ops = node[2][1:] opstr = node[2][0][1] + '(' elif node[0] == 'value': ops = node[2] opstr = '' else: ops = node[2] opstr = node[0] + '[' args = [] for op in ops: if op[0] == 'varptr': args.append('&' + str(op[1])) elif op[0] != 'node': args.append(str(op[1])) else: args.append('$' + str(op[1])) opstr = opstr + ', '.join(args) if node[0] == 'call': opstr = opstr + ')' elif node[0] != 'value': opstr = opstr + ']' fout.write(' node%d_%d [label="<f1> %d |<f2> %s"];\n' % (fnidx, i, i, xml_escape(opstr))); for succ in node[1]: if succ[1] == 0: fout.write(' node%d_%d -> node%d_%d [label="%s"];\n' % (fnidx, i, fnidx, succ[0], xml_escape(convert_sir_const(succ[2])))) if succ[1] == 1: fout.write(' node%d_%d -> node%d_%d [label="%s",style="dotted"];\n' % (fnidx, i, fnidx, succ[0], xml_escape(convert_sir_const(succ[2])))) fnidx += 1 fout.write('}')
def scrape(comic_id): title, strips = get_homepage_data(comic_id) xmlContent=[] xmlContent.append('<?xml version="1.0" encoding="utf-8"?>') xmlContent.append('<feed xmlns="http://www.w3.org/2005/Atom">') xmlContent.append('<title>%s</title>' % xml_escape(title)) strip_count = 0 for strip_date, strip_url in strips: counter=0 #print time.localtime() while True: try: strip_image_url = get_strip_image_url(strip_url) break except : print ("Retrying %s in %d" % (strip_url, SLEEP_BETWEEN_RETRIES), file=sys.stderr) counter=counter+1 if counter >=MAX_RETRIES: break time.sleep(SLEEP_BETWEEN_RETRIES) if not strip_image_url: continue strip_count += 1 xmlContent.append('<entry>') xmlContent.append(' <title>%s</title>' % xml_escape(strip_date.strftime('%A, %B %d, %Y'))) xmlContent.append(' <id>%s</id>' % strip_url) xmlContent.append(' <published>%sT12:00:00.000Z</published>' % strip_date.isoformat()) xmlContent.append(' <link rel="alternate" href="%s" type="text/html"/>' % xml_escape(strip_url)) xmlContent.append(' <content type="xhtml">') xmlContent.append(' <div xmlns="%s"><img src="%s"/></div>' % (XHTML_NS, xml_escape(strip_image_url))) xmlContent.append(' </content>') xmlContent.append('</entry>') time.sleep(SLEEP_BETWEEN_COMICS) #if not strip_count: #xmlContent.append '<entry>' #xmlContent.append ' <title>Could not scrape feed</title>' #xmlContent.append ' <id>tag:persistent.info,2013:gocomics-scrape-%d</id>' % int(time.time()) #xmlContent.append ' <link rel="alternate" href="https://github.com/mihaip/feed-scraping" type="text/html"/>' #xmlContent.append ' <content type="html">' #xmlContent.append ' Could not scrape the feed. Check the GitHub repository for updates.' #xmlContent.append ' </content>' #xmlContent.append '</entry>' xmlContent.append('</feed>') return "\n".join(xmlContent)
def markdown_first_paragraph(text): try: rendered = markdown(text, safe_mode='escape') # Extract the contents of the first <p> root = lxml.etree.fromstring('<div>' + rendered + '</div>') first_para = root.find('p') html_content = ''.join([xml_escape(first_para.text or '')] + [ lxml.etree.tostring(child) for child in first_para.iterchildren() ] + [xml_escape(first_para.tail or '')]) return XML(html_content) except Exception: return text
def markdown_first_paragraph(text): try: rendered = markdown(text, safe_mode='escape') # Extract the contents of the first <p> root = lxml.etree.fromstring('<div>' + rendered + '</div>') first_para = root.find('p') html_content = ''.join( [xml_escape(first_para.text or '')] + [lxml.etree.tostring(child) for child in first_para.iterchildren()] + [xml_escape(first_para.tail or '')]) return XML(html_content) except Exception: return text
def upsert_xml(self): return u""" <accessControl><listing>%s</listing>%s</accessControl>%s <schedule><startDate>%s</startDate><duration>%s</duration><timeZoneID>%s</timeZoneID></schedule> <metaData><sessionName>%s</sessionName><description>%s</description></metaData> """ % ( self.visibility.upper(), self.account.meetings_require_password and '<sessionPassword>0000</sessionPassword>' or '', self.session_key and ('<sessionKey>%s</sessionKey>' % self.session_key) or '', self.starts_at.strftime("%m/%d/%Y %H:%M:%S"), (self.ends_at - self.starts_at).m, timezone.get_id(self.starts_at.tz.zone), xml_escape( self.title), xml_escape(self.description))
def upsert_xml(self): return u""" <accessControl><listing>%s</listing>%s</accessControl>%s <schedule><startDate>%s</startDate><duration>%s</duration><timeZoneID>%s</timeZoneID></schedule> <metaData><sessionName>%s</sessionName><description>%s</description></metaData> """ % ( self.visibility.upper(), self.account.meetings_require_password and '<sessionPassword>0000</sessionPassword>' or '', self.session_key and ('<sessionKey>%s</sessionKey>'%self.session_key) or '', self.starts_at.strftime("%m/%d/%Y %H:%M:%S"), (self.ends_at-self.starts_at).m, timezone.get_id(self.starts_at.tz.zone), xml_escape(self.title), xml_escape(self.description))
def format_lexc_xml(wordmap): analysis = xml_escape(wordmap["lemma"]) analysis = analysis.replace("|", '<s mcs="wb"/>').replace("_", '<s mcs="mb"/>') analysis += '<s mcs="' + wordmap["pos"] + '"/>' if wordmap["is_proper"]: analysis += '<s mcs="proper"/>' if wordmap["is_suffix"]: analysis = "<s mcs='suffix'/>" + analysis if wordmap["is_prefix"]: analysis += "<s mcs='prefix'/>" stub = xml_escape(wordmap["stub"]) stub = stub.replace("|", '<s mcs="wb"/>').replace("_", '<s mcs="mb"/>') return ' <e><a>%s</a><i>%s</i><cont lexica="%s"/></e>' % (analysis, stub, wordmap["new_para"])
def do_export(self, file): entries = Entry.objects.filter(latest__deleted=False) entries = entries.order_by('latest__word__word') file.write('<galkwi-exported version="2.0">\n') for entry in entries: rev = entry.latest word = rev.word file.write('<Entry>\n') file.write(' <word>%s</word>\n' % xml_escape(word.word)) file.write(' <pos>%s</pos>\n' % xml_escape(word.pos)) if word.props: file.write(' <props>%s</props>\n' % xml_escape(word.props)) if word.stem: file.write(' <stem>%s</stem>\n' % xml_escape(word.stem)) if word.etym: file.write(' <etym>%s</etym>\n' % xml_escape(word.etym)) if word.description: file.write(' <description>%s</description>\n' % xml_escape(word.description)) file.write(' <revisions>\n') while rev: file.write(' <revision>\n') file.write(' <name>%s</name>\n' % xml_escape(rev.user.username)) timestamp = rev.timestamp.astimezone(timezone.utc) file.write(' <datetime>%s</datetime>\n' % timestamp.strftime('%Y-%m-%dT%H:%M:%SZ')) if rev.comment: file.write(' <comment>%s</comment>\n' % xml_escape(rev.comment)) file.write(' </revision>\n') rev = rev.parent file.write(' </revisions>\n') file.write('</Entry>\n') file.write('</galkwi-exported>\n')
def cmd_export(self): """USAGE: canto-remote export This will print an OPML file to standard output.""" print("""<opml version="1.0">""") print("""\t<body>""") for f in self._get_feeds(): print("""\t\t<outline text="%s" xmlUrl="%s" type="rss" />""" %\ (xml_escape(f["name"].replace("\"","\\\"")), xml_escape(f["url"]))) print("""\t</body>""") print("""</opml>""")
def convert_aspectFilters(webParams, APIParams): for k, v in webParams.items(): if k[0].isupper(): assert (len(v) == 1) v = v[0] del webParams[k] aspectFilters = APIParams.get('aspectFilter', []) aspects = urlparse.unquote(v).split('|') # todo unicode ? aspects = [ 'Not Specified' if x == '!' else xml_escape(x) for x in aspects ] k = drop_bell(xml_escape(k)) aspectFilters.append({'aspectName': k, 'aspectValueName': aspects}) APIParams['aspectFilter'] = aspectFilters
def _generate_loc_file(self): # Only generate this junk if needed if not self._strings and not self._journals: return def write_line(value, *args, **kwargs): # tabs suck, then you die... whitespace = " " * kwargs.pop("indent", 0) if args or kwargs: value = value.format(*args, **kwargs) line = "".join((whitespace, value, "\n")) stream.write(line.encode("utf-16_le")) age_name = self._age_name enc = plEncryptedStream.kEncAes if self._version == pvEoa else None file_name = "{}.loc".format(age_name) with self._generate_file(file_name, enc=enc) as stream: # UTF-16 little endian byte order mark stream.write(b"\xFF\xFE") write_line("<?xml version=\"1.0\" encoding=\"utf-16\"?>") write_line("<localizations>") write_line("<age name=\"{}\">", age_name, indent=1) # Arbitrary strings defined by something like a GUI or a node tree for set_name, elements in self._strings.items(): write_line("<set name=\"{}\">", set_name, indent=2) for element_name, translations in elements.items(): write_line("<element name=\"{}\">", element_name, indent=3) for language_name, value in translations.items(): write_line("<translation language=\"{language}\">{translation}</translation>", language=language_name, translation=xml_escape(value), indent=4) write_line("</element>", indent=3) write_line("</set>", indent=2) # Journals if self._journals: write_line("<set name=\"Journals\">", indent=2) for journal_name, translations in self._journals.items(): write_line("<element name=\"{}\">", journal_name, indent=3) for language_name, value in translations.items(): write_line("<translation language=\"{language}\">{translation}</translation>", language=language_name, translation=xml_escape(value), indent=4) write_line("</element>", indent=3) write_line("</set>", indent=2) # Verbose XML junk... # <Deledrius> You call it verbose. I call it unambiguously complete. write_line("</age>", indent=1) write_line("</localizations>")
def _list_domain_settings( self, object ): '''add list with domain settings to page 2''' rows = [] settings = umcd.List() for text, key in ( ( _( 'Name' ), 'name' ), ( _( 'Description' ), 'description' ), ( _( 'CPUs' ), 'cpus' ), ( _( 'Memory' ), 'memory' ) ): if object.options.get( key ): settings.add_row( [ umcd.HTML( '<i>%s</i>' % text ), object.options.get( key, '' ) ] ) else: settings.add_row( [ umcd.HTML( '<i>%s</i>' % text ) ] ) if object.options.get( 'vnc' ): value = _( 'activated' ) else: value = _( 'deactivated' ) settings.add_row( [ umcd.HTML( '<i>%s</i>' % _( 'Direct access' ) ), value ] ) rows.append( [ settings ] ) rows.append( [ umcd.HTML( '<b>%s</b><br>' % _( 'Attached drives' ) ), ] ) html = '<ul class="umc_listing">' for dev in self.drives: values = {} if dev.device == uvmmp.Disk.DEVICE_DISK: values[ 'type' ] = _( 'hard drive' ) elif dev.device == uvmmp.Disk.DEVICE_CDROM: values[ 'type' ] = _( 'CDROM drive' ) elif dev.device == uvmmp.Disk.DEVICE_FLOPPY: values[ 'type' ] = _( 'floppy drive' ) else: values[ 'type' ] = _( 'unknown' ) if dev.source: dir = os.path.dirname(dev.source) for pool in self.drive_wizard.storage_pools.values(): if pool.path == dir: values['size'] = MemorySize.num2str(dev.size) if len(os.path.basename(dev.source)) > 40: values['image'] = xml_escape("%s..." % os.path.basename(dev.source)[0:40]) else: values['image'] = xml_escape(os.path.basename(dev.source)) values['pool'] = xml_escape(pool.name) html += _('<li>%(type)s: %(size)s (image file %(image)s in pool %(pool)s)</li>') % values break else: values['device'] = dev.source html += _('<li>%(type)s: local device %(device)s</li>') % values else: html += _('<li>%(type)s: empty device</li>') % values html += '</ul>' rows.append( [ umcd.HTML( html ) ] ) self[InstanceWizard.PAGE_SUMMARY].options[0] = umcd.List( content = rows )
def format_lexc_xml(wordmap): analysis = xml_escape(wordmap['lemma']) analysis = analysis.replace('|', '<s mcs="wb"/>').replace('_', '<s mcs="mb"/>') analysis += '<s mcs="' + wordmap['pos'] + '"/>' if wordmap['is_proper']: analysis += '<s mcs="proper"/>' if wordmap['is_suffix']: analysis = "<s mcs='suffix'/>" + analysis if wordmap['is_prefix']: analysis += "<s mcs='prefix'/>" stub = xml_escape(wordmap['stub']) stub = stub.replace('|', '<s mcs="wb"/>').replace('_', '<s mcs="mb"/>') return (' <e><a>%s</a><i>%s</i><cont lexica="%s"/></e>' % (analysis, stub, wordmap['new_para']))
def get(license_class, locale='en', **kwargs): if 'jurisdiction' not in kwargs or not(kwargs['jurisdiction']): kwargs['jurisdiction'] = '-' # generate the answers XML answers = "<answers><locale>%s</locale><license-%s>" % ( locale, license_class.name) # get the list of valid fields for this license class fields = license_class.kwargs() work_info = {} for k in kwargs: if k in fields: answers = answers + "<%s>%s</%s>" % (k, kwargs[k].strip(), k) elif k == 'version': answers = answers + "<%s>%s</%s>" % (k, str(kwargs[k]), k) else: work_info[k] = xml_escape(kwargs[k]) str_work_info = "".join( ["<%s>%s</%s>" % (k, work_info[k], k) for k in work_info] ) answers = answers + "</license-%s><work-info>%s</work-info></answers>" % ( license_class.name, str_work_info) # delegate to the normal issue method return issue(answers)
def _convert_class_to_xml(source, xml_prefix=True): if source is None: return '' xmlstr = '' if xml_prefix: xmlstr = '<?xml version="1.0" encoding="utf-8"?>' if isinstance(source, list): for value in source: xmlstr += _convert_class_to_xml(value, False) elif isinstance(source, WindowsAzureData): class_name = source.__class__.__name__ xmlstr += '<' + class_name + '>' for name, value in vars(source).items(): if value is not None: if isinstance(value, list) or \ isinstance(value, WindowsAzureData): xmlstr += _convert_class_to_xml(value, False) else: xmlstr += ('<' + _get_serialization_name(name) + '>' + xml_escape(str(value)) + '</' + _get_serialization_name(name) + '>') xmlstr += '</' + class_name + '>' return xmlstr
def Browse(self, BrowseFlag, StartingIndex, RequestedCount, ObjectID, Filter=None, SortCriteria=None): RequestedCount = int(RequestedCount) path = self.object_id_to_path(ObjectID) if BrowseFlag == 'BrowseDirectChildren': children = list(self.list_dlna_dir(path)) start = int(StartingIndex) stop = (start + RequestedCount) if RequestedCount else None result_elements = list(thread_pool.map( self.object_xml, itertools.repeat(ObjectID), children[start:stop])) total_matches = len(children) else: # TODO check other flags parent_id = path_to_object_id(os.path.normpath(os.path.split(path)[0])) result_elements = [self.object_xml(parent_id, path, '??ROOT??', None)] total_matches = 1 if logging.root.isEnabledFor(logging.DEBUG): logging.debug( 'ContentDirectory::Browse result:\n%s', pprint.pformat(result_elements)) return dict( Result=xml_escape(didl_lite(''.join(result_elements))), NumberReturned=len(result_elements), TotalMatches=total_matches)
def _cleanOdt(text): def compress(spaces): return _spaces(len(spaces.group(0))) s = xml_escape(text) s = re.sub(u" {2,}", compress, s) return s
def _EscapeResourceString(raw_resource): if type(raw_resource) == int: return raw_resource return xml_escape(raw_resource)\ .replace('\\', '\\\\')\ .replace('\"','\\\"')\ .replace('\'','\\\'')
def add_from_magnets(self, uris): """Add a list of magnet uris to torrent_liststore.""" already_added = 0 for uri in uris: magnet = deluge.common.get_magnet_info(uri) if not magnet: log.error('Invalid magnet: %s', uri) continue torrent_id = magnet['info_hash'] files = magnet['files_tree'] if not self._add_torrent_liststore(torrent_id, magnet['name'], xml_escape(uri), files, None): already_added += 1 continue if files: continue d = client.core.prefetch_magnet_metadata(uri) d.addCallback(self._on_uri_metadata, uri) self.prefetching_magnets.append(magnet['info_hash']) self.prefetch_waiting_message(torrent_id, None) if already_added: self.show_already_added_dialog(already_added)
def GET(self, req): """ Handle GET Service request """ req.query_string = 'format=json' resp = req.get_response(self.app) status = resp.status_int if status != HTTP_OK: if status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN): return get_err_response('AccessDenied') else: return get_err_response('InvalidURI') containers = loads(resp.body) # we don't keep the creation time of a backet (s3cmd doesn't # work without that) so we use something bogus. body = '<?xml version="1.0" encoding="UTF-8"?>' \ '<ListAllMyBucketsResult ' \ 'xmlns="http://doc.s3.amazonaws.com/2006-03-01">' \ '<Buckets>%s</Buckets>' \ '</ListAllMyBucketsResult>' \ % ("".join(['<Bucket><Name>%s</Name><CreationDate>' '2009-02-03T16:45:09.000Z</CreationDate></Bucket>' % xml_escape(i['name']) for i in containers])) return HTTPOk(content_type='application/xml', body=body)
def reload_global(self, client, connection_id, entry, data): """Unbind all global hotkeys and rebind the show_hide method. If more global hotkeys should be added, just connect the gconf key to the watch system and add. """ gkey = entry.get_key() key = entry.get_value().get_string() if key == 'disabled': return try: self.guake.hotkeys.unbind(self.globalhotkeys[gkey]) except KeyError: pass self.globalhotkeys[gkey] = key if not self.guake.hotkeys.bind(key, self.guake.show_hide): keyval, mask = gtk.accelerator_parse(key) label = gtk.accelerator_get_label(keyval, mask) filename = pixmapfile('guake-notification.png') guake.notifier.show_message( _('Guake Terminal'), _('A problem happened when binding <b>%s</b> key.\n' 'Please use Guake Preferences dialog to choose another ' 'key') % xml_escape(label), filename)
def do_list(self, q): marker = q.params['marker'][0] if 'marker' in q.params else None max_keys = int(q.params['max_keys'][0]) if 'max_keys' in q.params else 1000 prefix = q.params['prefix'][0] if 'prefix' in q.params else '' resp = ['<?xml version="1.0" encoding="UTF-8"?>', '<ListBucketResult xmlns="%s">' % self.xml_ns, '<MaxKeys>%d</MaxKeys>' % max_keys, '<IsTruncated>false</IsTruncated>' ] count = 0 for key in sorted(self.server.data): if not key.startswith(prefix): continue if marker and key <= marker: continue resp.append('<Contents><Key>%s</Key></Contents>' % xml_escape(key)) count += 1 if count == max_keys: resp[3] = '<IsTruncated>true</IsTruncated>' break resp.append('</ListBucketResult>') body = '\n'.join(resp).encode() self.send_response(200) self.send_header("Content-Type", 'text/xml') self.send_header("Content-Length", str(len(body))) self.end_headers() self.wfile.write(body)
def _convert_class_to_xml(source, xml_prefix=True): if source is None: return "" xmlstr = "" if xml_prefix: xmlstr = '<?xml version="1.0" encoding="utf-8"?>' if isinstance(source, list): for value in source: xmlstr += _convert_class_to_xml(value, False) elif isinstance(source, WindowsAzureData): class_name = source.__class__.__name__ xmlstr += "<" + class_name + ">" for name, value in vars(source).iteritems(): if value is not None: if isinstance(value, list) or isinstance(value, WindowsAzureData): xmlstr += _convert_class_to_xml(value, False) else: xmlstr += ( "<" + _get_serialization_name(name) + ">" + xml_escape(str(value)) + "</" + _get_serialization_name(name) + ">" ) xmlstr += "</" + class_name + ">" return xmlstr
def GET(self, env, start_response): """ Handle GET Service request """ env['QUERY_STRING'] = 'format=json' body_iter = self.app(env, self.do_start_response) status = int(self.response_args[0].split()[0]) headers = dict(self.response_args[1]) if status != 200: if status == 401: return get_err_response('AccessDenied') else: return get_err_response('InvalidURI') containers = loads(''.join(list(body_iter))) # we don't keep the creation time of a backet (s3cmd doesn't # work without that) so we use something bogus. body = '<?xml version="1.0" encoding="UTF-8"?>' \ '<ListAllMyBucketsResult ' \ 'xmlns="http://doc.s3.amazonaws.com/2006-03-01">' \ '<Buckets>%s</Buckets>' \ '</ListAllMyBucketsResult>' \ % ("".join(['<Bucket><Name>%s</Name><CreationDate>' \ '2009-02-03T16:45:09.000Z</CreationDate></Bucket>' % xml_escape(i['name']) for i in containers])) resp = Response(status=200, content_type='application/xml', body=body) return resp