def get_http_response_string(gateway_session_id, ivr_responses, collect_input=False, hang_up=True, input_length=None): xml_string = "" for response in ivr_responses: text_to_say = response["text_to_say"] audio_file_url = response["audio_file_url"] if audio_file_url is not None: xml_string += "<playaudio>%s</playaudio>" % escape(audio_file_url) elif text_to_say is not None: xml_string += "<playtext>%s</playtext>" % escape(text_to_say) input_length_str = "" if input_length is not None: input_length_str = 'l="%s"' % input_length if input_length == 1: timeout = "3000" else: timeout = "5000" if collect_input: xml_string = '<collectdtmf %s o="%s">%s</collectdtmf>' % (input_length_str, timeout, xml_string) if hang_up: xml_string += "<hangup/>" return '<response sid="%s">%s</response>' % (gateway_session_id[7:], xml_string)
def render_report_group(group, request, css_class=''): """Produce the HTML for a report group on a section index page""" if not (IPeopleSectionColumn.providedBy(group) or IPeopleReportGroup.providedBy(group)): return '' result = [] title = getattr(group, 'title', '') if title: result.append('<h3>%s</h3>' % escape(group.title)) if css_class: result.append('<ul class=%s>' % quoteattr(css_class)) else: result.append('<ul>') for obj in group.values(): if IPeopleReport.providedBy(obj): url = resource_url(obj, request) result.append('<li><a href=%s class=%s>%s</a></li>' % ( quoteattr(url), quoteattr(obj.css_class), escape(obj.link_title))) elif IPeopleReportGroup.providedBy(obj): html = render_report_group(obj, request) result.append('<li>') result.append(html) result.append('</li>') result.append('</ul>') return '\n'.join(result)
def give(self, new_owner=None): if not new_owner: try: new_owner = self._ask_question('Give ticket to…', 'Enter the username of the new owner.', 'Username:'******'Owner': new_owner, 'id': self._ticket}) if response.result == RTResult.CANT_REASSIGN: self._notify("Couldn't change owner", "This ticket belongs to %s, not you.%s" % ( saxutils.escape(response.owner), ' Try stealing it first.' if 'actions' not in self._notify.caps else ''), gtk.STOCK_DIALOG_WARNING, [self.show, self.steal_and_give(new_owner=new_owner, partial=True)]) elif response.result == RTResult.OWNER_CHANGED: self._notify('Owner changed', 'This ticket now belongs to %s.' % saxutils.escape(response.owner), gtk.STOCK_DIALOG_INFO, [self.show]) elif response.result == RTResult.NO_SUCH_USER: self._notify('No such user', "The ticket's owner wasn't changed as the specified user doesn't exist.", gtk.STOCK_DIALOG_WARNING, [self.show, self.give_again])
def output_difference(difference, print_func, parents): logger.debug('html output for %s', difference.source1) sources = parents + [difference.source1] print_func(u"<div class='difference'>") try: print_func(u"<div class='diffheader'>") if difference.source1 == difference.source2: print_func(u"<div><span class='source'>%s<span>" % escape(difference.source1)) else: print_func(u"<div><span class='source'>%s</span> vs.</div>" % escape(difference.source1)) print_func(u"<div><span class='source'>%s</span>" % escape(difference.source2)) anchor = '/'.join(sources[1:]) print_func(u" <a class='anchor' href='#%s' name='%s'>¶</a>" % (anchor, anchor)) print_func(u"</div>") if difference.comment: print_func(u"<div class='comment'>%s</div>" % escape(difference.comment).replace('\n', '<br />')) print_func(u"</div>") if difference.unified_diff: output_unified_diff(print_func, difference.unified_diff) for detail in difference.details: output_difference(detail, print_func, sources) except PrintLimitReached: logger.debug('print limit reached') raise finally: print_func(u"</div>", force=True)
def get_cell_data_cb(self, column, cell, model, piter, user_data=None): tool = model.get_value(piter, self.TOOL_COLUMN) if tool == None or not isinstance(tool, Tool): if tool == None: label = _('All Languages') elif not isinstance(tool, GtkSource.Language): label = _('Plain Text') else: label = tool.get_name() markup = saxutils.escape(label) editable = False else: escaped = saxutils.escape(tool.name) if tool.shortcut: key, mods = Gtk.accelerator_parse(tool.shortcut) label = Gtk.accelerator_get_label(key, mods) markup = '%s (<b>%s</b>)' % (escaped, label) else: markup = escaped editable = True cell.set_properties(markup=markup, editable=editable)
def _update_publish(cls, upd_root): subscribe_item = etree.Element('resultItem') subscribe_item.extend(deepcopy(upd_root)) from_name = escape(upd_root[0].attrib['name']) if not from_name in cls._graph: cls._graph[from_name] = {'ident': upd_root[0], 'links': {}} if len(upd_root) == 2: meta_name = re.sub("{.*}", "contrail:", upd_root[1][0].tag) link_key = meta_name link_info = {'meta': upd_root[1]} cls._graph[from_name]['links'][link_key] = link_info elif len(upd_root) == 3: meta_name = re.sub("{.*}", "contrail:", upd_root[2][0].tag) to_name = escape(upd_root[1].attrib['name']) link_key = '%s %s' % (meta_name, to_name) link_info = {'meta': upd_root[2], 'other': upd_root[1]} cls._graph[from_name]['links'][link_key] = link_info # reverse mapping only for strong refs # currently refs from same type to each other is weak ref from_type = from_name.split(':')[1] to_type = to_name.split(':')[1] if not to_name in cls._graph: cls._graph[to_name] = {'ident': upd_root[1], 'links': {}} link_key = '%s %s' % (meta_name, from_name) link_info = {'meta': upd_root[2], 'other': upd_root[0]} cls._graph[to_name]['links'][link_key] = link_info else: raise Exception("Unknown ifmap update: %s" % (etree.tostring(upd_root))) subscribe_result = etree.Element('updateResult') subscribe_result.append(subscribe_item) return subscribe_result
def _get_add_xml(array_of_hash, overwrite=True): """ Creates add XML message to send to Solr based on the array of hashes (documents) provided. :param overwrite: Newer documents will replace previously added documents with the same uniqueKey (default is True) """ xml = '<add overwrite="%s">' % ('true' if overwrite else 'false') for doc_hash in array_of_hash: doc = '<doc>' for key, value in doc_hash.items(): if type(value) == type(list()): for v in value: if isinstance(v, basestring): v = escape(v) doc = '%s<field name="%s">%s</field>' % (doc, key, v) else: if isinstance(value, basestring): value = escape(value) doc = '%s<field name="%s">%s</field>' % (doc, key, value) doc = '%s</doc>' % (doc) xml = '%s%s' % (xml, doc) xml = '%s</add>' % (xml) return xml
def task_label_column(self, node): str_format = "%s" if node.get_status() == Task.STA_ACTIVE: # we mark in bold tasks which are due today or as Now days_left = node.get_days_left() if days_left is not None and days_left <= 0: str_format = "<b>%s</b>" if self._has_hidden_subtask(node): str_format = "<span color='%s'>%s</span>"\ % (self.unactive_color, str_format) title = str_format % saxutils.escape(node.get_title()) if node.get_status() == Task.STA_ACTIVE: count = self.mainview.node_n_children(node.get_id(), recursive=True) if count != 0: title += " (%s)" % count elif node.get_status() == Task.STA_DISMISSED: title = "<span color='%s'>%s</span>" % (self.unactive_color, title) if self.config.get("contents_preview_enable"): excerpt = saxutils.escape(node.get_excerpt(lines=1, strip_tags=True, strip_subtasks=True)) title += " <span size='small' color='%s'>%s</span>" \ % (self.unactive_color, excerpt) return title
def whats_this_helper(desc, include_more_link=False): """ A `What's this` text construction helper. If `include_more_link` is True then the text will include a `more...` link. """ title = desc.name help_url = desc.help if not help_url: help_url = "help://search?" + urlencode({"id": desc.id}) description = desc.description long_description = desc.long_description template = ["<h3>{0}</h3>".format(escape(title))] if description: template.append("<p>{0}</p>".format(escape(description))) if long_description: template.append("<p>{0}</p>".format(escape(long_description[:100]))) if help_url and include_more_link: template.append("<a href='{0}'>more...</a>".format(escape(help_url))) return "\n".join(template)
def append_text(self, text): text1, text2 = (text + '\n').split('\n', 1) text = self.text_markup % (self.foreground_color, escape(text1), escape(text2)) self.text = self.text + "\n" + text self.message_label.set_text(self.text) self.message_label.set_use_markup(True) self.message_label.show()
def tooltip_helper(desc): """Widget tooltip construction helper. """ tooltip = [] tooltip.append("<b>{name}</b>".format(name=escape(desc.name))) if desc.project_name and desc.project_name != "Orange": tooltip[0] += " (from {0})".format(desc.project_name) if desc.description: tooltip.append("{0}".format( escape(desc.description))) inputs_fmt = "<li>{name}</li>" if desc.inputs: inputs = "".join(inputs_fmt.format(name=inp.name) for inp in desc.inputs) tooltip.append("Inputs:<ul>{0}</ul>".format(inputs)) else: tooltip.append("No inputs") if desc.outputs: outputs = "".join(inputs_fmt.format(name=out.name) for out in desc.outputs) tooltip.append("Outputs:<ul>{0}</ul>".format(outputs)) else: tooltip.append("No outputs") return "<hr/>".join(tooltip)
def message_to_xmb(message): """Converts a single message object to a <msg> tag in the XMB format.""" # TODO(lschumacher) handle plurals message_id = message.id if isinstance(message_id, tuple): message_id = message_id[0] if 'python-format' in message.flags: xml_parts = [] ph_index = 0 for token in PYTHON_FORMAT_RE.split(message_id): if token.startswith('%(') and token.endswith(')s'): name = token[2:-2] ph_index += 1 xml_parts.append('<ph name="%s"><ex>%s</ex>%%%d</ph>' % (name, name, ph_index)) elif token == '%%': xml_parts.append('%') elif token: xml_parts.append(escape(token)) xml_message = ''.join(xml_parts) else: xml_message = escape(message_id) return '<msg desc=%s>%s</msg>' % ( quoteattr(' '.join(message.user_comments)), xml_message)
def process(self, node): """ Process the given xml `node`: collect `todo` and `done` items. """ if ( isinstance(node, SKIPPED_ELEMENT_TYPES) or node.tag in SKIPPED_ELEMENTS or node.get("translation", "").strip() == "off" or node.tag == "attribute" and node.get("name") not in TRANSLATED_ATTRS ): # do not translate the contents of the node tail, node.tail = node.tail, None self.done(etree.tostring(node)) self.todo(escape(tail or "")) return # process children nodes locally in child_trans child_trans = XMLTranslator(self.callback) child_trans.todo(escape(node.text or "")) for child in node: child_trans.process(child) if (child_trans.all_todo() and node.tag in TRANSLATED_ELEMENTS and not any(attr.startswith("t-") for attr in node.attrib)): # serialize the node element as todo self.todo(serialize(node.tag, node.attrib, child_trans.get_todo()), child_trans.needs_trans) else: # complete translations and serialize result as done for attr in TRANSLATED_ATTRS: if node.get(attr): node.set(attr, self.process_text(node.get(attr))) self.done(serialize(node.tag, node.attrib, child_trans.get_done())) # add node tail as todo self.todo(escape(node.tail or ""))
def get(self): bind_id = self.get_argument("bind_id") youtube_id = self.get_argument("youtube_id", "-1") time_watched = self.get_argument("time_watched", -1) logger.debug( "Set-top box list request received: bind_id: %s, youtube_id: %s, time_watched: %s", bind_id, youtube_id, time_watched, ) try: if youtube_id != "-1" and youtube_id != "Title": Video.mark_watched(bind_id=bind_id, youtube_id=youtube_id, time_watched=time_watched) except: dbutils.Session.rollback() try: container = Video.get_list(bind_id=bind_id, is_stb=True) playback_url = fetch_yt_link(container["nowplaying"]["url"]) playback_url = escape(playback_url + "&title=.mp4") container["nowplaying"]["url"] = escape(container["nowplaying"]["url"]) container["nowplaying"]["title"] = escape(container["nowplaying"]["title"]) container["nowplaying"]["youtube_id"] = escape(container["nowplaying"]["youtube_id"]) container["nowplaying"]["playback_url"] = playback_url except: dbutils.Session.rollback() container = [] logger.info("timewatched: %s youtube_id:%s bind_id:%s", time_watched, youtube_id, bind_id) # self.content_type = 'text/xml' //HACK wtf why doesn't this work..? self._headers["Content-Type"] = "text/xml; charset=UTF-8" if container != []: self.write(dict2xml({"nowplaying": container["nowplaying"]})) dbutils.Session.remove() self.finish()
def _detailed_text(self, item): if isinstance(item, Installed): remote, dist = item if remote is None: description = get_dist_meta(dist).get("Description") description = description else: description = remote.description else: description = item[0].description if docutils is not None: try: html = docutils.core.publish_string( trim(description), writer_name="html", settings_overrides={ "output-encoding": "utf-8", # "embed-stylesheet": False, # "stylesheet": [], # "stylesheet_path": [] } ).decode("utf-8") except docutils.utils.SystemMessage: html = "<pre>{}<pre>".format(escape(description)) except Exception: html = "<pre>{}<pre>".format(escape(description)) else: html = "<pre>{}<pre>".format(escape(description)) return html
def xsams_source(source): """ Yields the XML for an individual source in the XSAMS document. Arguments: source: an instance of the hitran_meta.models Source class. """ yield '<Source sourceID="%s">' % (make_xsams_id('B', source.id),) author_list = source.authors.split(',') if source.note: yield '<Comments>%s</Comments>' % escape(source.note) yield '<Authors>' for author in author_list: yield '<Author><Name>%s</Name></Author>' % author yield '</Authors>' yield make_mandatory_tag('Title', escape(source.title), '[This source does' ' not have a title]') yield make_mandatory_tag('Category', source.source_type.xsams_category, '') # XXX what to do when the year is missing? yield make_mandatory_tag('Year', source.year, '2008') yield make_optional_tag('SourceName', source.journal) yield make_optional_tag('Volume', source.volume) yield make_optional_tag('PageBegin', source.page_start) yield make_optional_tag('PageEnd', source.page_end) yield make_optional_tag('ArticleNumber', source.article_number) yield make_optional_tag('UniformResourceIdentifier', urllib.quote(source.url)) yield make_optional_tag('DigitalObjectIdentifier', source.doi) yield '</Source>\n'
def ToTagUri(self): """Returns a tag: URI for this entity for use in XML output. Foreign keys for entities may be represented in XML output as tag URIs. RFC 4151 describes the tag URI scheme. From http://taguri.org/: The tag algorithm lets people mint - create - identifiers that no one else using the same algorithm could ever mint. It is simple enough to do in your head, and the resulting identifiers can be easy to read, write, and remember. The identifiers conform to the URI (URL) Syntax. Tag URIs for entities use the app's auth domain and the date that the URI is generated. The namespace-specific part is <kind>[<key>]. For example, here is the tag URI for a Kitten with the key "Fluffy" in the catsinsinks app: tag:catsinsinks.googleapps.com,2006-08-29:Kitten[Fluffy] Raises a BadKeyError if this entity's key is incomplete. """ if not self.has_id_or_name(): raise datastore_errors.BadKeyError( 'ToTagUri() called for an entity with an incomplete key.') return u'tag:%s.%s,%s:%s[%s]' % (saxutils.escape(self.app()), os.environ['AUTH_DOMAIN'], datetime.date.today().isoformat(), saxutils.escape(self.kind()), saxutils.escape(str(self)))
def update(self, resources, locale, domain): """Save resources by given locale in domain file""" if locale == self.SOURCE_LOCALE: path = "values/{}.xml".format(domain) else: path = "values-{}/{}.xml".format(locale, domain) filename = os.path.join(self.basepath, path) xml = open(filename, 'w') # write xml header xml.write(XML_START) for resource in resources: if resource.is_plural: xml.write( XML_PLURALS_START.format(resource=resource).encode("utf-8") ) for (quantity, text) in resource.plurals.items(): # Write only nonempty plural forms if text.strip(): xml.write( XML_PLURALS_ITEM.format( quantity=quantity, text=escape(text, APOS) ).encode("utf-8") ) xml.write(XML_PLURALS_END) else: # quoteattr text in resource resource.message = escape(resource.text, APOS) xml.write(XML_STRING.format(resource=resource).encode("utf-8")) xml.write(XML_END) xml.close()
def new_link_item(self, source_item, source_channel, sink_item, sink_channel): """ Construct and return a new :class:`.LinkItem` """ item = items.LinkItem() item.setSourceItem(source_item) item.setSinkItem(sink_item) def channel_name(channel): if isinstance(channel, basestring): return channel else: return channel.name source_name = channel_name(source_channel) sink_name = channel_name(sink_channel) fmt = u"<b>{0}</b> \u2192 <b>{1}</b>" item.setToolTip( fmt.format(escape(source_name), escape(sink_name)) ) item.setSourceName(source_name) item.setSinkName(sink_name) item.setChannelNamesVisible(self.__channel_names_visible) return item
def branchentry(self, branch): # check if this branch must be checked, if not, recurse further if not NifSpell._branchinspect(self, branch): return True blocktype = branch.__class__.__name__ reports = self.toaster.reports_per_blocktype.get(blocktype) if not reports: # start a new report for this block type row = "<tr>" row += "<th>%s</th>" % "file" row += "<th>%s</th>" % "id" for attr in branch._get_filtered_attribute_list(data=self.data): row += ("<th>%s</th>" % escape(attr.displayname, self.ENTITIES)) row += "</tr>" reports = [row] self.toaster.reports_per_blocktype[blocktype] = reports row = "<tr>" row += "<td>%s</td>" % escape(self.stream.name) row += "<td>%s</td>" % escape("0x%08X" % id(branch), self.ENTITIES) for attr in branch._get_filtered_attribute_list(data=self.data): row += ("<td>%s</td>" % escape(dumpAttr(getattr(branch, "_%s_value_" % attr.name)), self.ENTITIES)) row += "</tr>" reports.append(row) # keep looking for blocks of interest return True
def w3c_validate(filename): """ For index.html, the validation URL is http://validator.w3.org/check?uri=http%3A%2F%2Fusers.nccs.gov%2F~tpb%2Ftechint_olcf%2Findex.html&charset=%28detect+automatically%29&doctype=Inline&group=0&user-agent=W3C_Validator%2F1.3 """ entities = {':': '%3a', '/': '%2F', '(': '%28', ')': '%29'} validator = "http://validator.w3.org" host = "http://users.nccs.gov/" path = "~tpb/techint" uri = xml.escape("uri=%s%s/%s" % (host, path, filename), entities) charset = xml.escape("charset=(detect+automatically)", entities) doctype = "doctype=Inline" group = "group=0" agent = xml.escape("user-agent=W3C_Validator/1.3", entities) url = "%s/check?%s&%s&%s&%s&%s" % (validator, uri, charset, doctype, group, agent) # print url page = urllib2.urlopen(url) text = page.readlines() vname = "validation_%s" % filename h = open(vname, 'w') h.writelines(text) h.close() print("Validation output is in %s" % vname) assess_validation(vname)
def escape_html(input_file, output_file=os.getcwd() + '/'): f = file(input_file, 'r') for line in f.xreadlines(): if output_file == os.getcwd() + '/': save_result_file(escape(line, html_escape_dict), output_file + input_file + '_escape') else: save_result_file(escape(line, html_escape_dict), output_file + '_escape')
def set_personal_message(self, personal_message='', current_media=None, signature_sound=None): """Sets the new personal message @param personal_message: the new personal message @type personal_message: string""" cm = '' if current_media is not None: cm ='\\0Music\\01\\0{0} - {1}\\0%s\\0%s\\0\\0' % \ (xml_utils.escape(current_media[0]), xml_utils.escape(current_media[1])) if signature_sound is not None: signature_sound = xml_utils.escape(signature_sound) ss = '<SignatureSound>%s</SignatureSound>' % signature_sound message = xml_utils.escape(personal_message) pm = '<Data>'\ '<PSM>%s</PSM>'\ '<CurrentMedia>%s</CurrentMedia>'\ '<MachineGuid>%s</MachineGuid>'\ '</Data>' % (message, cm, self._client.machine_guid.upper()) self._send_command('UUX', payload=pm) self._client.profile._server_property_changed("personal-message", personal_message) if current_media is not None: self._client.profile._server_property_changed("current-media", current_media)
def as_xml (self, encoding="UTF-8"): """(str): str Return the XML representation of the collection. The 'encoding' parameter will be substituted into the XML document's header, but it's the caller's responsibility to encode the string returned from this method. """ from qel import QEL_HEADER, QEL_FOOTER s = QEL_HEADER % encoding if self.title: s += ' <title>%s</title>\n' % escape(self.title) if self.editor: s += ' <editor>%s</editor>\n' % escape(self.editor) if self.description: s += ' <description>%s</description>\n' % escape(self.description) if self.copyright: s += ' <copyright>%s</copyright>\n' % escape(self.copyright) if self.license: s += ' <license>' for t in self.license: if isinstance(t, str): s += escape(t) else: s += t.as_xml() s += '\n </license>\n' for qt in self: s += qt.as_xml() s += QEL_FOOTER return s
def listToRokuXml(listTag, itemTag, l): xml = '<' + saxutils.escape(listTag) + '>\n' for item in l: if type(item) is dict: xml += dictionaryToRokuXml(itemTag, item) xml += '</' + saxutils.escape(listTag) + '>\n' return xml
def _build_health_report(incarnation, container_id, role_instance_id, status, substatus, description): #Escape '&', '<' and '>' description = saxutils.escape(ustr(description)) detail = u'' if substatus is not None: substatus = saxutils.escape(ustr(substatus)) detail = (u"<Details>" u"<SubStatus>{0}</SubStatus>" u"<Description>{1}</Description>" u"</Details>").format(substatus, description) xml = (u"<?xml version=\"1.0\" encoding=\"utf-8\"?>" u"<Health " u"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" u" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">" u"<GoalStateIncarnation>{0}</GoalStateIncarnation>" u"<Container>" u"<ContainerId>{1}</ContainerId>" u"<RoleInstanceList>" u"<Role>" u"<InstanceId>{2}</InstanceId>" u"<Health>" u"<State>{3}</State>" u"{4}" u"</Health>" u"</Role>" u"</RoleInstanceList>" u"</Container>" u"</Health>" u"").format(incarnation, container_id, role_instance_id, status, detail) return xml
def as_xml(self): """Convert instance into QEL, pretty-printing it. """ id = date = typ = "" if self.id is not None: id = ' id="%s"' % escape(self.id) if self.date is not None: date = ' date="%s"' % escape(self.date) if self.type is not None: typ = ','.join(self.type) typ = ' type="%s"' % escape(typ) output = " <quotation%s%s%s>\n" % (id, date, typ) for paragraph in self.text: output += " <p>\n" output += format_paragraph_as_xml(paragraph, 6*' ') output += " </p>\n" for i in ['author', 'source']: value = getattr(self, i) if value is not None: output += (6*' ') + value.as_xml() + '\n' if self.note: output += " <note>\n" for paragraph in self.note: output += " <p>\n" output += format_paragraph_as_xml(paragraph, 8*' ') output += " </p>\n" output += " </note>\n" output += " </quotation>\n" return output
def html_elements(fullfilename, title, caption, identifier): return '<div class="item"><img class="content" src="' +\ escape(fullfilename) + '" title="' +\ escape(title) + '" identifier="' +\ identifier + '"/> <div class="caption">' +\ escape(caption) + '</div> </div>'
def on_display_text(self): # Now do the stuff that takes a bit longer... heading = self.episode.title subheading = _('from %s') % (self.episode.channel.title) description = self.episode.description if self.have_webkit: global SHOWNOTES_HTML_TEMPLATE # Get the description - if it looks like plaintext, replace the # newline characters with line breaks for the HTML view description = self.episode.description if '<' not in description: description = description.replace('\n', '<br>') args = ( saxutils.escape(heading), saxutils.escape(subheading), description, ) url = os.path.dirname(self.episode.channel.url) self.htmlview.load_html_string(SHOWNOTES_HTML_TEMPLATE % args, url) else: self.b.create_tag('heading', scale=pango.SCALE_LARGE, weight=pango.WEIGHT_BOLD) self.b.create_tag('subheading', scale=pango.SCALE_SMALL) self.b.insert_with_tags_by_name(self.b.get_end_iter(), heading, 'heading') self.b.insert_at_cursor('\n') self.b.insert_with_tags_by_name(self.b.get_end_iter(), subheading, 'subheading') self.b.insert_at_cursor('\n\n') self.b.insert(self.b.get_end_iter(), util.remove_html_tags(description)) self.b.place_cursor(self.b.get_start_iter())
def thank_after_registered(self, name, token, tmp_user_id, user_id, lang): url = u"https://sh-hringhorni.appspot.com/activate?t=" + escape(token) + u"&i=" + escape(tmp_user_id) + "&u=" + escape(user_id) + u"\n" body = escape(name) + u"さん、\nShourへようこそ。\n\nShourにあなたのメールアドレスが仮登録され、アカウントが発行されました。下記のURLをクリックしてアカウントを有効化してください。\n" + url + u"\nShourは、ちょっとした時間を親しい間柄の人との楽しいひと時に変える新感覚予定共有アプリです。\n\n「今度お茶しよう」\n「久しぶりに会いたいね」\n「また語りましょう」\nそんな友達との約束を、Shourで是非実現してみてください。\n\n日々の生活の中にあるスキマ時間が、シャワータイムのようなリフレッシュのひと時になるよう、Shourはお手伝いします。\n\nShour開発チーム一同\n\n※このメールに覚えがない場合、誤って送信された可能性がございます。大変お手数ですが、破棄してください。" if lang: if lang == "en": body = u"Hi," + escape(name) +".\nYou are the newest member of \"Shour\".\n\nClick here to verify your email address. \n" + url + u"\nWith shour, you can enjoy a pleasant time with your close friends in your spare time. Shour will help you spent refreshing moments like shower time. Please just try it! We hope you enjoy \"SHaring your hOUR\"!! \n\nDevelopment team of Shour\n\nNotice: If you do not know any idea about this email, this may be miscarriaged to wrong email address. We appologize for this inconvenience and thank you for delete this email." return body
def setrenderer(): # tell the controlpoint which renderer is current print "---- setrenderer -----------------------------------" print "renderertitle: " + str(request.vars.renderertitle) print "renderertype: " + str(request.vars.renderertype) print "renderertarget: " + str(request.vars.renderertarget) print "queuetarget: " + str(request.vars.queuetarget) print "queuedata: " + str(request.vars.queuedata) print "queuechain: " + str(request.vars.queuechain) ptitle = escape(request.vars.renderertitle, url_escape_entities) ptype = request.vars.renderertype ptarget = request.vars.renderertarget qtarget = request.vars.queuetarget qdata = request.vars.queuedata qchainparams = request.vars.queuechain pentry = ptype + '::' + ptitle print "entry: " + str(pentry) # get the meta data for this renderer datastring = urllib.urlopen('http://' + ip_address + ':50101/data/rendererData?data=' + pentry).read() datadict = unwrap_data(datastring) print "rendererData: " + str(datadict) print dataout = '' queueentry = '' for item in datadict: colonpos = item.find('::') id = item[:colonpos] text = item[colonpos + 2:] if id == 'QUEUE': # this is the queue entry name dataout += "update_queueentry('" + text + "');" queueentry = text # get the now playing data for this renderer datastring = urllib.urlopen('http://' + ip_address + ':50101/data/rendererPoll?data=' + pentry).read() datadict = unwrap_data(datastring) print "rendererPoll: " + str(datadict) print pollout = formatrendererstatus(datadict) # create queue browse script if appropriate queuescript = '' if queueentry != '': qentries = queueentry.split('::') qid = qentries[0] qtype = qentries[1] qmenu = qentries[2] qtext = qentries[3] qoption = 'tree' qstring = '' qoperator = '' qparams = [ 'paramtitle=' + qtext, 'paramtype=' + qtype, 'paramtarget=' + qtarget, 'paramid=' + qid, 'paramoption=' + qoption, 'parammenutype=' + qmenu, 'searchstring=' + qstring, 'searchoperator=' + qoperator, 'browsedata=' + qdata, 'chainparams=' + qchainparams ] queuescript1 = "ajax2('getdata', " + str(qparams) + ", ':eval');" # adjust qdata for subsequent calls - pass 0 as sequence qdata = '0' + qdata[1:] qparams = [ 'paramtitle=' + qtext, 'paramtype=' + qtype, 'paramtarget=' + qtarget, 'paramid=' + qid, 'paramoption=' + qoption, 'parammenutype=' + qmenu, 'searchstring=' + qstring, 'searchoperator=' + qoperator, 'browsedata=' + qdata, 'chainparams=' + qchainparams ] queuescript2 = "ajax2('getdata', " + str(qparams) + ", ':eval');" queuescript += queuescript1 + "update_queuecall(\"" + queuescript2 + "\");" else: # need to clear queue from html queuescript = "jQuery('#" + qtarget + "').html('');" # print "setrenderer return: " # print "dataout: " + str(dataout) # print "pollout: " + str(pollout) # print "queuescript: " + str(queuescript) allout = dataout + pollout + queuescript # print "allout: " + str(allout) return allout
for opinions in sentence.findall('Opinions'): o.write('\t\t\t\t<Opinions>\n') for opinion in opinions.findall('Opinion'): o.write( '\t\t\t\t\t<Opinion category="%s" polarity="%s" from="%s" to="%s"/>\n' % (fix(opinion.get('category')), opinion.get('polarity'), opinion.get('from'), opinion.get('to'))) o.write('\t\t\t\t</Opinions>\n') o.write('\t\t\t</sentence>\n') o.write('\t\t</sentences>\n') o.write('\t</Review>\n') o.write('</Reviews>') fix = lambda text: escape(text.encode('utf8')).replace('\"', '"') '''Simple fix for writing out text.''' def load_lexicon(lex_type, b): '''Load each category's lexicon defined by its type.''' #entity lexica food = [] drinks = [] service = [] ambience = [] location = [] restaurant = [] #attribute lexica
def body(self): return escape(self._body)
def return_document(directories, files, error, type, after_data, op, auth_add, user, writer): writer.write("<?xml version=\"1.0\"?>\n") tag = "unknown" if op == "readdir" or op == "stat": tag = op else: error = "Unknown op type." writer.write("<myemsl-%s version=\"1.0.0\">\n" % (tag)) if error: writer.write(" <error message=\"%s\"/>\n" % (saxutils.escape(error))) else: #FIXME only put this in if not already later #This is not right. Data seen and later != 0? if not after_data and type == DOCUMENT_FILTER_ARGS: writer.write(" <dir name=\"-later-\" type=\"%s\"/>\n" % (DOCUMENT_FILTER_ARGS)) if directories: for d in directories: tmptype = type if d["name"] == 'data' and type == DOCUMENT_FILTERS: tmptype = DOCUMENT_DATA name = "" if op != "stat": name = " name=\"%s\"" % (saxutils.escape(str(d["name"]))) writer.write(" <dir%s type=\"%s\"/>\n" % (name, tmptype)) if files: #FIXME make this configurable. items_per_auth_token = 100 token_offset = 0 auth_tokens = [] auth_items = [] for f in files: auth_str = "" if auth_add: auth_str = " authidx=\"%s\"" % (token_offset) name = "" if op != "stat": name = " name=\"%s\"" % (saxutils.escape(str(f["name"]))) #FIXME remove location once item server is fully in place. writer.write( " <file%s location=\"%s\" itemid=\"%s\" size=\"%s\"%s/>\n" % (name, saxutils.escape(str( f["location"])), f["itemid"], f["size"], auth_str)) if auth_add: auth_items.append(f["itemid"]) if len(auth_items) >= items_per_auth_token: auth_tokens.append( myemsl.token.simple_items_token_gen( auth_items, person_id=user_id)) auth_items = [] token_offset += 1 if auth_add: if files and len(auth_items) > 0: auth_tokens.append(auth_sign(auth_items)) writer.write(" <auth>\n") for t in auth_tokens: writer.write(" <token>%s</token>\n" % (t)) writer.write(" </auth>\n") writer.write("</myemsl-%s>\n" % (tag))
def save(self): """ Saves the current BookList to the associated file. """ with open(self.file, "w", encoding="utf-8") as b_f: b_f.write("<?xml version=\"1.0\" encoding=\"utf-8\"?>\n") b_f.write('<booklist>\n') for name in sorted( self.bookmap): # enable a diff of archived copies book = self.get_book(name) dbname = escape(book.get_dbname()) b_f.write(' <book name="%s" database="%s">' '\n' % (escape(name), dbname)) for item in book.get_item_list(): b_f.write(' <item name="%s" ' 'trans_name="%s">\n' % (item.get_name(), item.get_translated_name())) options = item.option_class.handler.options_dict for option_name in sorted(options.keys()): # enable a diff option_value = options[option_name] if isinstance(option_value, (list, tuple)): b_f.write(' <option name="%s" value="" ' 'length="%d">\n' % (escape(option_name), len(options[option_name]))) for list_index in range(len(option_value)): option_type = type_name( option_value[list_index]) value = escape(str(option_value[list_index])) value = value.replace('"', '"') b_f.write(' <listitem number="%d" ' 'type="%s" value="%s"/>\n' % (list_index, option_type, value)) b_f.write(' </option>\n') else: option_type = type_name(option_value) value = escape(str(option_value)) value = value.replace('"', '"') b_f.write( ' <option name="%s" type="%s" ' 'value="%s"/>\n' % (escape(option_name), option_type, value)) b_f.write(' <style name="%s"/>' '\n' % item.get_style_name()) b_f.write(' </item>\n') if book.get_paper_name(): b_f.write(' <paper name="%s"/>' '\n' % book.get_paper_name()) if book.get_orientation() is not None: # 0 is legal b_f.write(' <orientation value="%s"/>' '\n' % book.get_orientation()) if book.get_paper_metric() is not None: # 0 is legal b_p_metric = book.get_paper_metric() if isinstance(b_p_metric, bool): b_p_metric = int(b_p_metric) b_f.write(' <metric value="%s"/>' '\n' % b_p_metric) if book.get_custom_paper_size(): size = book.get_custom_paper_size() b_f.write(' <size value="%f %f"/>' '\n' % (size[0], size[1])) if book.get_margins(): for pos in range(len(book.get_margins())): b_f.write(' <margin number="%s" ' 'value="%f"/>\n' % (pos, book.get_margin(pos))) if book.get_format_name(): b_f.write(' <format name="%s"/>' '\n' % book.get_format_name()) if book.get_output(): b_f.write(' <output name="%s"/>' '\n' % escape(book.get_output())) b_f.write(' </book>\n') b_f.write('</booklist>\n')
def getdata(): # get data from the server relating to the parameter values print "---- getdata -----------------------------------" print "paramtitle: " + str(request.vars.paramtitle) print "paramtype: " + str(request.vars.paramtype) print "paramtarget: " + str(request.vars.paramtarget) print "paramid: " + str(request.vars.paramid) print "paramoption: " + str(request.vars.paramoption) print "parammenutype: " + str(request.vars.parammenutype) print "searchstring: " + str(request.vars.searchstring) print "searchoperator: " + str(request.vars.searchoperator) print "browsedata: " + str(request.vars.browsedata) print "chainparams: " + str(request.vars.chainparams) ptitle = escape(request.vars.paramtitle, url_escape_entities) ptype = request.vars.paramtype ptarget = request.vars.paramtarget pid = request.vars.paramid poption = request.vars.paramoption pmenu = request.vars.parammenutype psearch = escape(request.vars.searchstring, url_escape_entities) poperator = request.vars.searchoperator pdata = request.vars.browsedata pchain = request.vars.chainparams s_vars = True if 's_id' in request.vars.keys(): ps_id = request.vars.s_id else: s_vars = False if 's_type' in request.vars.keys(): ps_type = request.vars.s_type else: s_vars = False s_name = True if 'searchname' in request.vars.keys(): searchname = request.vars.searchname else: s_name = False datas = pdata.split(',') dataseq = int(datas[0]) datastart = int(datas[1]) datacount = int(datas[2]) # format request entry pentry = pid + '::' + ptype + '::' + pmenu + '::' + ptitle # append server id and type if passed if s_vars == True: pentry += '::' + ps_id + '::' + ps_type pentry += '::' + pdata # append search vars if present if psearch != "": pentry += ':::' + psearch if poperator != "": pentry += '::' + poperator print "entry: " + str(pentry) gotdata = False while gotdata == False: # get data from the server datastring = urllib.urlopen('http://' + ip_address + ':50101/data/getData?data=' + pentry).read() datadict = unwrap_data(datastring) # check whether we have received any data if datadict[0].startswith('NOTREADY'): time.sleep(0.3) else: gotdata = True # remove any message messagescript = '' message = get_message(datadict) if message != None: messagescript = 'setmessagebar("' + message + '");' # remove any return totals (must be present) messagescript = '' ret = get_return(datadict) rets = ret.split(':') retcount = int(rets[0]) rettotal = int(rets[1]) newtotal = datastart + retcount # special case - reset dataseq if it was called with zero if dataseq == 0: dataseq = 1 # calc number of calls needed if dataseq == 1: recallcount = (rettotal / datacount) + 1 # decide whether we want to separate chainparams = pchain.split(',') chainseparate = chainparams[0] if chainseparate == '1': separate = True else: separate = False currentletter = '' if separate == True: currentletter = chainparams[1] currenttarget = chainparams[2] if separate == True and dataseq == 1 and rettotal < 54: # not enough entries to separate - reset separate chainseparate == '0' separate = False message = "Returned " + str(newtotal) + " of " + str(rettotal) messagescript += 'setmessagebar("' + message + '");' # format the data received from the server as an unordered list out = '' firstsep = False if dataseq == 1: firstsep = True if separate == False: out += '<ul type="none" id="navigation">' foldercount = 0 searchitems = [] multipletargets = '' allletters = [] lettercount = 0 # TODO: simplify all these booleans prevseparate = False firstseparate = True for item in datadict: extraentry = None rementry = item if '::::' in rementry: # this entry contains extra details entries = rementry.split('::::') rementry = entries[0] extraentry = entries[1].split('::') if ':::' in rementry: # this entry contains search criteria entries = rementry.split(':::') rementry = entries[0] searchentry = entries[1].split('::') searchtype = searchentry[0] if len(searchentry) >= 2: searchcriteria = searchentry[1] else: searchcriteria = None if len(searchentry) == 3: searchoperators = searchentry[2] else: searchoperators = None # must be a base entry entry = rementry.split('::') id = entry[0] sid = '' if '|||' in id: entries = id.split('|||') id = entries[0] sid = entries[1] type = entry[1] menu = entry[2] text = entry[3] # TODO: fix this properly ctext = text.replace('&colon;', ':') # s_id = entry[4] # s_type = entry[5] target = 'target' + str(id) atarget = '"atarget' + id + '"' ref = '?' + target if separate == True: newbreak = False # check whether we need to separate thisletter = ctext[:1].upper() if thisletter.isalpha(): if thisletter != currentletter: currentletter = thisletter newbreak = True else: if currentletter == '': currentletter = '#' newbreak = True # if newbreak == False and firstseparate == True and currenttarget != '': if newbreak == False and firstseparate == True: # this is a continuation of a previous separate - need to indent if currenttarget == '': out = '<ul>' + out else: out = '<li><span id="' + currenttarget + '"><ul>' + out firstseparate = False if newbreak == True: if firstsep == False: if prevseparate == True: out += '</ul>' out += '</li>' prevseparate = False # it's possible that there isn't a preceding <li> entry in this pass (will be in a previous pass) # - if so we need to add one otherwise the replace will remove the </li> we are about to add # TODO: only check this once if out.find('<li') == -1: out = '<li><ul>' + out prevseparate = True out += '</ul>' out += '</li>' else: firstsep = False allletters.append(currentletter) lettercount += 1 l_target = 'target' + '__' + str(lettercount) l_atarget = '"atarget' + '__' + str(lettercount) + '"' l_menu = 'NONE' l_type = 'C' l_text = currentletter l_icon = '<span class="ui-icon ui-icon-minus" style="float:left"></span>' # l_play = ' class="dummy"' l_play = '' out += '<li tree="open" visited="y"' + l_play + '><span type="' + l_type + '"><a id=' + l_atarget + ' menu="' + l_menu + '" type="' + l_type + '">' + l_icon + l_text + '</a></span><span id="s' + l_target + '"></span><span id="' + l_target + '">' out += '<ul type="none" id="navigation">' else: l_target = 'target' + '__' + str(lettercount) play = '' search = '' if type == 'T': icon = '<span style="float:left"></span><img src="/sonospy/static/note.png">' play = ' class="play"' if type == 'N': icon = '<span style="float:left"></span><img src="/sonospy/static/block.png">' play = ' class="dummy"' elif type == 'C': # icon = '<span class="ui-icon ui-icon-plus" style="float:left"></span><img src="">' #TODO: check whether we need a null image icon = '<span class="ui-icon ui-icon-plus" style="float:left"></span><span class="ui-icon ui-icon-triangle-1-ne" style="float:right"></span>' elif type == 'A': icon = '<span class="ui-icon ui-icon-plus" style="float:left"></span><img src="/sonospy/static/album.png">' elif type == 'B': icon = '<span class="ui-icon ui-icon-plus" style="float:left"></span><img src="/sonospy/static/artist.png">' elif type == 'S': icon = '<span style="float:left"></span><img src="/sonospy/static/search.png">' search = ' searchtype="' + searchtype + '"' if text != 'ALL': searchitems.append(id + "::" + text) else: # have a multiple search option - pass the other options through c = 1 st = '' for si in searchitems: itemname = si.split("::")[1] multipletargets += '<span id="' + target + '__' + str( c) + '" sname="' + itemname + '"></span>' st += si + '__' c += 1 search += ' searchtype2="' + st[:-2] + '"' if searchcriteria != None: search += ' searchcriteria="' + searchcriteria + '"' if searchoperators != None: search += ' searchoperators="' + searchoperators + '"' extras = '' extracreator = '' extraalbum = '' extraart = '' if extraentry != None: for ex in extraentry: if ex.startswith('creator='): extracreator = ex[8:] extras += '<span class="extra"> [' + extracreator + ']</span>' elif ex.startswith('album='): extraalbum = ex[6:] extras += '<span class="extra"> [' + extraalbum + ']</span>' elif ex.startswith('art='): extraart = ex[4:] if type == 'C': # this works at the moment because the only container with extras is album insertalbum(id, ctext, extracreator, extraart, item) out += '<li tree="closed"' + play + search + '><span type="' + type + '"><a id=' + atarget + ' menu="' + menu + '" type="' + type + '" sid="' + sid + '">' + icon + ctext + extras + '</a></span><span id="s' + target + '"></span><span id="' + target + '"></span>' + multipletargets + '</li>' foldercount += 1 # check if we received all entries chainscript = '' iconscript = '' if newtotal < rettotal: # there are more entries so we want to chain another ajax call # update the data values if (rettotal - newtotal) < datacount: datacount = rettotal - newtotal nextdata = str(dataseq + 1) + ',' + str(newtotal) + ',' + str(datacount) # update the target target_base = ptarget.split('-')[0] nexttarget = target_base + '-' + str(dataseq + 1) # check if we need to close a separate if separate == True: out += '</ul>' out += '</span></li>' # create targets for remaining calls if dataseq == 1: for call in range(recallcount): calltarget = target_base + '-' + str(dataseq + call + 1) out += '<span id="' + calltarget + '"></span>' # set chain params chainparams = chainseparate + ',' if chainseparate == '1': chainparams += currentletter # add letter continuation target du = l_target.split('__') if len(du) == 1: cont = '1' else: cont = str(int(du[1]) + 1) chainparams += ',' + du[0] + '__' + cont # call again - use call that doesn't use form fields in case user is making another call params = [ 'paramtitle=' + ptitle, 'paramtype=' + ptype, 'paramtarget=' + nexttarget, 'paramid=' + pid, 'paramoption=' + poption, 'parammenutype=' + pmenu, 'searchstring=' + psearch, 'searchoperator=' + poperator, 'browsedata=' + nextdata, 'chainparams=' + chainparams ] chainscript += "ajax2('getdata', " + str(params) + ", ':eval');" else: # there are no more browse entries # finalise separator if separate == True: out += '</ul>' out += '</li>' # finalise list out += '</ul>' # create a script to post process the server data icon after updating if poption == 'tree': iconscript = "jQuery('.clicker').attr('class', 'ui-icon ui-icon-minus').removeClass('clicker');" elif poption == 'accord': iconscript = "jQuery('.clicker').removeClass('ui-icon-refresh').addClass('ui-icon-triangle-1-s').removeClass('clicker');" # return the scripts we want to run: # set the target html to be the list # call the post processing script # print "ptarget: " + str(ptarget) # print "out: " + str(out) height = "setheight();" if dataseq <= 1 or separate == False: # prepend name separator if specified if s_name: out = searchname + out return "eval('" + messagescript + "');jQuery('#" + ptarget + "').html('" + out + "');" + chainscript + iconscript + height else: return "eval('" + messagescript + "');jQuery('#" + ptarget + "').replaceWith('" + out + "');" + chainscript + iconscript + height
def getrootdata(): # get the root data for the selected UPnP server print "---- getrootdata -----------------------------------" print "paramtitle: " + str(request.vars.paramtitle) print "paramtype: " + str(request.vars.paramtype) print "paramtarget: " + str(request.vars.paramtarget) ptitle = escape(request.vars.paramtitle, url_escape_entities) ptype = request.vars.paramtype ptarget = request.vars.paramtarget pentry = ptype + '::' + ptitle print "entry: " + str(pentry) # first get all the context menus for this server datastring = urllib.urlopen('http://' + ip_address + ':50101/data/rootMenus?data=' + pentry).read() datadict = unwrap_data(datastring) # format the menus - create Javascript functions to load them # (we use individual functions as only the first one works if # we concatenate them all together (at least in Firefox)) # Note that the first set is not a menu but the default action(s) for double click menucount = int(datadict[0]) defaultscript = '' defaultscriptname = '' menuscripts = [] menuscripttext = '' i = 1 # process default option(s) entrycount = int(datadict[i]) defaultscriptname = datadict[i + 1] i += 2 defaultscript += 'function ' + defaultscriptname + '() {' for e in range(entrycount): menuid = datadict[i] menutext = datadict[i + 1] i += 2 defaultscript += 'processmenu("' + menuid + '");' defaultscript += '};' menuscripts.append(defaultscript) # process the menus for m in range(1, menucount): entrycount = int(datadict[i]) menuname = datadict[i + 1] i += 2 menuscripttext += 'function ' + menuname + '(id) {' menuscripttext += ' var menu = [' for e in range(entrycount): menuid = datadict[i] menutext = datadict[i + 1] i += 2 if menuid == 'SEP' and menutext == 'SEP': menuscripttext += ' $.contextMenu.separator,' else: menuscripttext += ' { "' + menutext + '": function(menuItem,menuObject) { processmenu("' + menuid + '",menuObject.target); } },' menuscripttext += ' ];' menuscripttext += ' ret=$(id).contextMenu(menu,{beforeShow:function bs(e){return beforeshowmenu(e)},afterShow:function as(m){aftershowmenu(m)}});' menuscripttext += ' return ret;' menuscripttext += '};' menuscripts.append(menuscripttext) menuscripttext = '' print menuscripts # get root entries for this server datastring = urllib.urlopen('http://' + ip_address + ':50101/data/rootData?data=' + pentry).read() datadict = unwrap_data(datastring) # remove any message messagescript = '' message = get_message(datadict) if message == None or message == '': message = 'Ready' messagescript = 'setmessagebar("' + message + '");' # format the root entries - create an accordion out = '' itemcount = 0 for item in datadict: entry = item.split('::') id = entry[0] type = entry[1] menu = entry[2] text = entry[3] if itemcount > 0: out += '</span>' if itemcount == 0: out += '<div id="accordion">' itemcount += 1 if text == 'Current Queue': # don't display queue continue atarget = '"atarget' + id + '"' target = '"target' + id + '"' out += '<h3 accord="closed"><a id=' + atarget + ' href="#" menu="' + menu + '" type="' + type + '">' + text + '<span class="ui-icon ui-icon-triangle-1-ne"></span></a></h3>' out += '<span id=' + target + '>' if itemcount > 0: out += '</span></div>' # create a script to load the menu scripts we just created callscript = '' for menuscript in menuscripts: callscript += 'var headID = document.getElementsByTagName("head")[0];' callscript += 'var script = document.createElement("script");' callscript += 'script.type = "text/javascript";' callscript += 'script.text = ' + menuscript + ';' callscript += 'headID.appendChild(script);' # create a script to store the default option script name defaultscript = 'document.forms[0].elements["defaultoptionname"].value="' + defaultscriptname + '";' # return the scripts we want to run: # set the target html to be the accordion # call the accordion creation function # execute the script to load the menu creation scripts # execute the script to bind the menus return "eval('" + messagescript + "');jQuery('#" + ptarget + "').html('" + out + "');liveaccordion();eval('" + callscript + "');eval('" + defaultscript + "');"
def account_listing_response(account, req, response_content_type, broker=None, limit='', marker='', end_marker='', prefix='', delimiter=''): if broker is None: broker = FakeAccountBroker() info = broker.get_info() resp_headers = { 'X-Account-Container-Count': info['container_count'], 'X-Account-Object-Count': info['object_count'], 'X-Account-Bytes-Used': info['bytes_used'], 'X-Timestamp': info['created_at'], 'X-PUT-Timestamp': info['put_timestamp'] } resp_headers.update( (key, value) for key, (value, timestamp) in broker.metadata.iteritems() if value != '') account_list = broker.list_containers_iter(limit, marker, end_marker, prefix, delimiter) if response_content_type == 'application/json': data = [] for (name, object_count, bytes_used, is_subdir) in account_list: if is_subdir: data.append({'subdir': name}) else: data.append({ 'name': name, 'count': object_count, 'bytes': bytes_used }) account_list = json.dumps(data) elif response_content_type.endswith('/xml'): output_list = [ '<?xml version="1.0" encoding="UTF-8"?>', '<account name=%s>' % saxutils.quoteattr(account) ] for (name, object_count, bytes_used, is_subdir) in account_list: if is_subdir: output_list.append('<subdir name=%s />' % saxutils.quoteattr(name)) else: item = '<container><name>%s</name><count>%s</count>' \ '<bytes>%s</bytes></container>' % \ (saxutils.escape(name), object_count, bytes_used) output_list.append(item) output_list.append('</account>') account_list = '\n'.join(output_list) else: if not account_list: resp = HTTPNoContent(request=req, headers=resp_headers) resp.content_type = response_content_type resp.charset = 'utf-8' return resp account_list = '\n'.join(r[0] for r in account_list) + '\n' ret = HTTPOk(body=account_list, request=req, headers=resp_headers) ret.content_type = response_content_type ret.charset = 'utf-8' return ret
def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. """ return saxutils.escape(value, {'"': '"', "'": '''})
if i[0] not in checkpnfs: continue if mdebug: l.write("%sLooking at %s\n" % (spaces, i[2])) if i[2] not in dirs: if debug: spaces += ' ' mycont = search_parent(i) if debug: spaces = spaces[0:-1] if debug: l.write(spaces + dirs[i[2]][0] + i[1] + ' ' + str(i[3]) + '\n') if not mycont: continue if pat and pat not in dirs[i[2]][0]: continue if ascii: entry = '%s %s' % (i[0], dirs[i[2]][0] + '/' + i[1]) else: entry = '<entry name="%s">' % (dirs[i[2]][0] + '/' + i[1]) for j in xrange(3, len(i)): if ascii: entry += ' %s' % str(i[j]) else: entry += '<%s>%s</%s>' % (xmltag[j - 3], escape(str( i[j])), xmltag[j - 3]) if ascii: entry += '\n' else: entry += '</entry>\n' f.write(entry) # if not ascii: # f.write('<entry name="%s"><%s>%s</%s></entry>\n' %(dirs[i[2]][0]+'/'+i[1],xmltag,escape(str(i[3])),xmltag)) # else: # f.write('%s %s %s\n' %(i[0],dirs[i[2]][0]+'/'+i[1],str(i[3]))) if not ascii: # again for syncat xml output f.write(""" </entry-set> </dump> </cat> """) #Close all
def _sentence_gen(sentences): for sentence in enumerate(sentences): yield (sentence[0], escape(_force_unicode(sentence[1])))
def send_payload(url, user, passwords): prefix = "<?xml version=\"1.0\"?><methodCall><methodName>system.multicall</methodName><params><param><value><array><data>" payload = "" suffix = "</data></array></value></param></params></methodCall>" for password in passwords: payload += "<value><struct><member><name>methodName</name><value><string>wp.getUsersBlogs</string></value></member><member><name>params</name>" payload += "<value><array><data><value><array><data><value><string>" + user + "</string></value><value><string>" + escape( password) + "</string></value>" payload += "</data></array></value></data></array></value></member></struct></value>" data = prefix + payload + suffix headers = {"Content-Type": "application/xml"} r = requests.post(url, data=data, headers=headers) r.encoding = 'UTF-8' #print(r.text) return r.text
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set): """ Builds the Component sections of the wxs file with their included files. Files need to be specified in 8.3 format and in the long name format, long filenames will be converted automatically. Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag. """ root = create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set) components = create_feature_dict(files) factory = Document() def get_directory(node, dir): """ Returns the node under the given node representing the directory. Returns the component node if dir is None or empty. """ if dir == '' or not dir: return node Directory = node dir_parts = dir.split(os.path.sep) # to make sure that our directory ids are unique, the parent folders are # consecutively added to upper_dir upper_dir = '' # walk down the xml tree finding parts of the directory dir_parts = [d for d in dir_parts if d != ''] for d in dir_parts[:]: already_created = [ c for c in Directory.childNodes if c.nodeName == 'Directory' and c.attributes['LongName'].value == escape(d) ] if already_created: Directory = already_created[0] dir_parts.remove(d) upper_dir += d else: break for d in dir_parts: nDirectory = factory.createElement('Directory') nDirectory.attributes['LongName'] = escape(d) nDirectory.attributes['Name'] = escape( gen_dos_short_file_name(d, filename_set)) upper_dir += d nDirectory.attributes['Id'] = convert_to_id(upper_dir, id_set) Directory.childNodes.append(nDirectory) Directory = nDirectory return Directory for file in files: drive, path = os.path.splitdrive(file.PACKAGING_INSTALL_LOCATION) filename = os.path.basename(path) dirname = os.path.dirname(path) h = { # tagname : default value 'PACKAGING_X_MSI_VITAL': 'yes', 'PACKAGING_X_MSI_FILEID': convert_to_id(filename, id_set), 'PACKAGING_X_MSI_LONGNAME': filename, 'PACKAGING_X_MSI_SHORTNAME': gen_dos_short_file_name(filename, filename_set), 'PACKAGING_X_MSI_SOURCE': file.get_path(), } # fill in the default tags given above. for k, v in [(k, v) for (k, v) in h.items() if not hasattr(file, k)]: setattr(file, k, v) File = factory.createElement('File') File.attributes['LongName'] = escape(file.PACKAGING_X_MSI_LONGNAME) File.attributes['Name'] = escape(file.PACKAGING_X_MSI_SHORTNAME) File.attributes['Source'] = escape(file.PACKAGING_X_MSI_SOURCE) File.attributes['Id'] = escape(file.PACKAGING_X_MSI_FILEID) File.attributes['Vital'] = escape(file.PACKAGING_X_MSI_VITAL) # create the <Component> Tag under which this file should appear Component = factory.createElement('Component') Component.attributes['DiskId'] = '1' Component.attributes['Id'] = convert_to_id(filename, id_set) # hang the component node under the root node and the file node # under the component node. Directory = get_directory(root, dirname) Directory.childNodes.append(Component) Component.childNodes.append(File)
f.write(" <!-- ..................................................... -->\n") f.write("\n") if langid: f.write( " <!-- This contains the LangID and should be translated to reflect the correct LangID. -->\n" ) f.write( " <!-- Supported language and codepage codes can be found here: https://msdn.microsoft.com/en-us/goglobal/bb964664.aspx -->\n" ) f.write(" <String Id=\"" + langid + "\">" + str(langIdAuto) + "</String>\n") f.write("\n") for entry in po: if entry.comment != "": f.write("\n") f.write(" <!--" + entry.comment.replace('\n', ' -->\n <!--') + " -->\n") if entry.msgstr != "": translation = escape(entry.msgstr) else: translation = escape(entry.msgid) translation = " ".join(translation.split("\n")).replace( '\r', '').encode("utf-8") f.write(" <String Id=\"" + entry.msgctxt.encode("utf-8") + "\">" + translation + "</String>\n") f.write("</WixLocalization>\n") f.close
def get_shadow_div(basename, content): return (u'<div class="shadow_file"' 'style="display:none" basename="{}">' '{}</div>').format(basename, escape(content))
def body(self): return escape(self._body).replace('"', """).replace("\r", "
")
def main(): street_view_url = None try: p = os.path.dirname(os.path.realpath(__file__)) f = open("%s/../setup/webroot_guid.txt" % p, "r") street_view_url = f.readline().strip() + "/web_data/street_views/" except: logging.debug( "Warning: Couldn't determind streetview webserver folder") print "Content-type: xml\n\n" MaltegoXML_in = sys.stdin.read() if MaltegoXML_in <> '': m = MaltegoMsg(MaltegoXML_in) logging.debug(MaltegoXML_in) cursor = stawk_db.dbconnect() TRX = MaltegoTransform() ssid = m.Value try: cursor.execute( "SELECT gps_lat,gps_long,country,code,address FROM wigle WHERE overflow = 0 AND ssid=%s LIMIT 500", (ssid) ) #Can be useful to LIMIT 5, or some such. Make sure to do the same in fetchClientsFromCountry.py #cursor.execute("SELECT gps_lat,gps_long,country,code,address FROM wigle WHERE overflow = 0 AND ssid=%s",(ssid)) results = cursor.fetchall() for row in results: # How to Unicode, plox? lat = row[0] long = row[1] # country=row[2].decode('raw_unicode_escape').encode('ascii','xmlcharrefreplace') # code=row[3].decode('raw_unicode_escape').encode('ascii','xmlcharrefreplace') # address=row[4].decode('utf-8').encode('ascii','xmlcharrefreplace') country = row[2].encode('utf8', 'xmlcharrefreplace') code = row[3].encode('utf8', 'xmlcharrefreplace') address = row[4].encode('utf8', 'xmlcharrefreplace') #NewEnt=TRX.addEntity("snoopy.ssidLocation",country) NewEnt = TRX.addEntity("maltego.Location", country) NewEnt.addAdditionalFields("latitude", "latitude", "strict", lat) NewEnt.addAdditionalFields("longitude", "longitude", "strict", long) NewEnt.addAdditionalFields("country", "Country", "strict", country) NewEnt.addAdditionalFields("countrycode", "Country Code", "strict", code) # NewEnt.addAdditionalFields("streetaddress", "Street Address", "strict", "<![CDATA[" + address + "]]>") NewEnt.addAdditionalFields("streetaddress", "Street Address", "strict", address) NewEnt.addAdditionalFields( "googleMap", "Google map", "nostrict", escape("http://maps.google.com/maps?t=h&q=%s,%s" % (lat, long))) logging.debug(street_view_url) if (street_view_url != None): NewEnt.addAdditionalFields( "streetview", "streetview", "strict", "%s/%s,%s.jpg" % (street_view_url, lat, long)) NewEnt.setIconURL("%s/%s,%s.jpg" % (street_view_url, lat, long)) except Exception, e: logging.debug(e) logging.debug(TRX) TRX.returnOutput()
rmtree(source_locale_dir) # update install.rdf proj_dir = pj(os.path.split(os.path.abspath(__file__))[0], '..') chromium_manifest = pj(proj_dir, 'platform', 'chromium', 'manifest.json') with open(chromium_manifest, encoding='utf-8') as m: manifest = json.load(m) # https://developer.mozilla.org/en-US/Add-ons/AMO/Policy/Maintenance#How_do_I_submit_a_Beta_add-on.3F # "To create a beta channel [...] '(a|alpha|b|beta|pre|rc)\d*$' " if sys.argv[2]: manifest['version'] += sys.argv[2] manifest['homepage'] = 'https://github.com/gorhill/uMatrix' manifest['description'] = escape(descriptions['en']) del descriptions['en'] manifest['localized'] = [] t = ' ' t3 = 3 * t for alpha2 in descriptions: if alpha2 == 'en': continue manifest['localized'].append( '\n' + t * 2 + '<localized><r:Description>\n' + t3 + '<locale>' + alpha2 + '</locale>\n' + t3 + '<name>' + manifest['name'] + '</name>\n' + t3 + '<description>' + escape(descriptions[alpha2]) + '</description>\n' + t3 + '<creator>' + manifest['author'] +
def attrs_s(attrs): """ This generate the XML attributes from an element attribute list """ l = [''] for i in range(0,len(attrs), 2): l.append('%s="%s"' % (attrs[i], escape(attrs[i+1]))) return ' '.join(l)
def run(self): shadow_files_divs = "" extra_attribs = "" argument_list = [] force_no_buttons = False is_lab = False def get_shadow_div(basename, content): return (u'<div class="shadow_file"' 'style="display:none" basename="{}">' '{}</div>').format(basename, escape(content)) if self.arguments: argument_list = self.arguments[0].split(' ') if 'no_button' in argument_list or ( 'class' in self.options and ('ada-nocheck' in self.options['class'] or 'ada-syntax-only' in self.options['class'])): force_no_buttons = True # look for lab=my_lab_name lab_matches = [ LAB_REGEX.match(line) for line in argument_list if LAB_REGEX.match(line) ] if len(lab_matches) == 1: extra_attribs += ' lab="True"' extra_attribs += ' lab_name={}'.format(lab_matches[0].group(1)) is_lab = True elif len(lab_matches) > 1: raise self.error("malformed lab directive") # Make sure code-config exists in the document if not codeconfig_found: print(self.lineno, dir(self)) raise self.error("you need to add a :code-config: role") if is_lab: # look for lab io start block io_start_matches = [ i for i, line in enumerate(self.content) if LAB_IO_START_REGEX.match(line) ] # look for lab io end block io_end_matches = [ i for i, line in enumerate(self.content) if LAB_IO_END_REGEX.match(line) ] # check for correct formation of lab io block if len(io_start_matches) == 1 and len( io_end_matches ) == 1 and io_start_matches[0] < io_end_matches[0]: io_content = self.content[io_start_matches[0] + 1:io_end_matches[0]] # create shadow file from io blocks new_file = "\n".join(io_content) shadow_files_divs += get_shadow_div(LABIO_FILENAME, new_file) # remove io block lines from self.content # The following does not work for some odd reason so we will have to copy the list # del self.content[io_start_matches[0] : (io_end_matches[0] + 1)] chop_contents = self.content[:io_start_matches[ 0]] + self.content[io_end_matches[0] + 1:] else: raise self.error( "malformed lab io block: io_start={} io_end={}".format( io_start_matches, io_end_matches)) else: chop_contents = self.content # chop contents into files try: # chop source files if 'manual_chop' in argument_list: files = c_chop(chop_contents) elif 'c' in argument_list: files = c_chop(chop_contents) else: files = real_gnatchop(chop_contents) except subprocess.CalledProcessError: raise self.error("could not gnatchop example") if config.accumulate_code: # We are accumulating code: store the new code in the # accumulated_files global accumulated_files for f in files: accumulated_files[f[0]] = f[1] try: if config.accumulate_code: editor_files = set([f[0] for f in files]) for k, v in accumulated_files.items(): if k not in editor_files: shadow_files_divs += get_shadow_div(k, v) divs = "\n".join([ u'<div class="file" basename="{}">{}</div>'.format( f[0], escape(f[1])) for f in files ]) nodes_latex = [] # Attemping to detect HTML or Latex output by checking for 'html' in tags if 'html' not in self.state.state_machine.document.settings.env.app.tags.tags: for f in files: # Based on sphinx/directives/code.py container_node = nodes.container( '', literal_block=True, classes=['literal-block-wrapper']) literal = nodes.literal_block('', f[1], format='latex') literal['language'] = self.arguments[0].split(' ')[0] literal['linenos'] = 'linenos' in self.options or \ 'lineno-start' in self.options literal['source'] = f[0] caption = nodes.caption('', f[0]) caption.source = literal.source caption.line = literal.line container_node += caption container_node += literal nodes_latex.append(container_node) except Exception: # If we have an exception here, it's probably a codec error print(files) raise if not force_no_buttons: for x in (config.buttons | set( filter(lambda y: y.endswith('_button'), argument_list))): extra_attribs += ' {}="True"'.format(x) return [ nodes.raw('', template.format(server_url=WIDGETS_SERVER_URL, files_divs=divs, shadow_files_divs=shadow_files_divs, extra_attribs=extra_attribs), format='html') ] + nodes_latex
sys.exit(1) archive.extractall() archive.close() if ext == "whl" or ext == "egg": src_dir = tmp_dir + "/" + mod else: src_dir = tmp_dir + "/%s-%s/%s" % (mod, ver, mod) addon = ADDON_XML % { 'modname': mod, 'version': ver, 'modauthor': escape(metadata['info']['author']), 'xbmc_python_ver': XBMC_PYTHON_VER, 'summary': escape(metadata['info']['summary']), 'description': escape(metadata['info']['description']), 'source_url': escape(metadata['info']['package_url']), 'author_email': escape(metadata['info']['author_email']), 'website': escape(metadata['info']['package_url']) } os.chdir(BASE_PATH) plugin_dir = 'script.module.python-%s' % mod if os.path.exists(plugin_dir): shutil.rmtree(plugin_dir)
def xml_write_simple_tag(f, name, val, indent=1): f.write('{}<{}>{}</{}>\n'.format(' ' * indent, name, escape(str(val)), name))
def __init__(self, text, **kwargs): super().__init__(**kwargs) self.escapedText = xml.escape(text)
def literal_element_char(self, data): self.current.object += escape(data)
def authz(self): authz_request = cherrypy.request.body.read() xmldoc = xml.dom.minidom.parseString(authz_request) if self.debug_flag: log('----------- AUTHZ BEGIN -----------') log('AUTHZ Request: %s' % xmldoc.toprettyxml()) now = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) # The request should look something like # request = ("<soapenv:Envelope xmlns:soapenv=\ # "http://schemas.xmlsoap.org/soap/envelope/\" # "xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" # "xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">" # "<soapenv:Body>" # "<samlp:AuthzDecisionQuery ID= # \"dbppegngllhegcobmfponljblmfhjjiglbkbmmco\" # "IssueInstant=\"2008-07-09T15:22:55Z\"" # "Resource=\"secure.yourdomain.com/protected/page2.html\" # Version=\"2.0\" # "xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"" # "xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\">" # "<saml:Issuer>google.com</saml:Issuer>" # "<saml:Subject>" # "<saml:NameID>user1</saml:NameID>" # "</saml:Subject>" # "<saml:Action Namespace=\"urn:oasis:names:tc:SAML:1.0: # action:ghpp\">GET</saml:Action>" # "</samlp:AuthzDecisionQuery>" # "<samlp:AuthzDecisionQuery ID= # \"eeppegngllhegcobmfabcnljblmfhrrniglbkbeed\" # "IssueInstant=\"2008-07-09T15:22:55Z\"" # "Resource=\"secure.yourdomain.com/protected/page3.html\" # Version=\"2.0\" # "xmlns:saml=\"urn:oasis:names:tc:SAML:2.0:assertion\"" # "xmlns:samlp=\"urn:oasis:names:tc:SAML:2.0:protocol\">" # "<saml:Issuer>google.com</saml:Issuer>" # "<saml:Subject>" # "<saml:NameID>user1</saml:NameID>" # "</saml:Subject>" # "<saml:Action Namespace=\"urn:oasis:names:tc:SAML:1.0: # action:ghpp\">GET</saml:Action>" # "</samlp:AuthzDecisionQuery>" # "</soapenv:Body>" # "</soapenv:Envelope>") saml_id = None resource = None username = None # parse out the SAML AuthzRequest # If we don't know *for sure* if a user is allowed or not, send back an # indeterminate. if the URL does *not* exist in the built in self.authz_db, # then we don't know if the user is allowed # or not for sure, then send back an Indeterminate. # GSA 6.0+ can request batch authorization where there can be multiple # samlp:AuthzDecisionQuery in one request for several URLs # which means we have to responde back in one response for each request. # If we're using the internal authz system, parse out the inbound # request for the URI and use that to check against the local authz db (spprefix, sprefix, samlp) = self.get_saml_namespace(xmldoc, 'AuthzDecisionQuery') log('using prefix: %s and %s' % (spprefix, sprefix)) response = ('<soapenv:Envelope ' 'xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">' '<soapenv:Body>') for node in samlp: if (node.nodeName == "%s:AuthzDecisionQuery" % spprefix): saml_id = node.attributes["ID"].value resource = node.attributes["Resource"].value samlName = node.getElementsByTagName("%s:NameID" % sprefix) for n_issuer in samlName: cnode= n_issuer.childNodes[0] if cnode.nodeType == node.TEXT_NODE: username = cnode.nodeValue # the SAML response and assertion are unique random ID numbers # back in the sring with the decision. rand_id_saml_resp = self.getrandom_samlID() rand_id_saml_assert = self.getrandom_samlID() decision = self.authz_backend.authorize(username, resource) if self.debug_flag: log ('authz ID: %s' %(saml_id)) log ('authz Resource: %s' %(resource)) log ('authz samlName %s' %(username)) log ('authz decision for resource %s [%s]' % (resource, decision)) response = response + ('<samlp:Response ' 'xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" ' 'xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" ' 'ID="%s" Version="2.0" IssueInstant="%s">' '<samlp:Status><samlp:StatusCode ' 'Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>' '</samlp:Status><saml:Assertion Version="2.0" ' 'ID="%s" IssueInstant="%s">' '<saml:Issuer>%s</saml:Issuer><saml:Subject>' '<saml:NameID>%s</saml:NameID></saml:Subject>' '<saml:AuthzDecisionStatement Resource="%s" ' 'Decision="%s"><saml:Action ' 'Namespace="urn:oasis:names:tc:SAML:1.0:action:ghpp">' 'GET</saml:Action></saml:AuthzDecisionStatement>' '</saml:Assertion></samlp:Response>') % (rand_id_saml_resp,now, rand_id_saml_assert, now, self.saml_issuer, username, escape(resource), decision) response += '</soapenv:Body></soapenv:Envelope>' if self.debug_flag: xmldoc = xml.dom.minidom.parseString(response) log('authz response %s' %(xmldoc.toprettyxml())) log('----------- AUTHZ END -----------') return response
def task_title_column(self, node): return saxutils.escape(node.get_title())
def _createReport(ttrs, timetaken, log, server, hostname): # import additional libraries here to speed up normal tests import httplib import urllib from urlparse import urlparse from xml.sax.saxutils import escape import codecs from xml.etree import ElementTree as etree timestamp = int(time.time()) result = {'timestamp': timestamp} result['timetaken'] = timetaken if log: try: data = codecs.open(log, 'r', encoding='UTF-8').read() result['install_log'] = escape(data) except: print("Cannot open log file %s" % log) # get ObsPy module versions result['obspy'] = {} tests = 0 errors = 0 failures = 0 skipped = 0 try: installed = get_git_version() except: installed = '' result['obspy']['installed'] = installed for module in sorted(ALL_MODULES): result['obspy'][module] = {} if module not in ttrs: continue result['obspy'][module]['installed'] = installed # test results ttr = ttrs[module] result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken'] result['obspy'][module]['tested'] = True result['obspy'][module]['tests'] = ttr.testsRun # skipped is not supported for Python < 2.7 try: skipped += len(ttr.skipped) result['obspy'][module]['skipped'] = len(ttr.skipped) except AttributeError: skipped = '' result['obspy'][module]['skipped'] = '' tests += ttr.testsRun # depending on module type either use failure (network related modules) # or errors (all others) result['obspy'][module]['errors'] = {} result['obspy'][module]['failures'] = {} if module in NETWORK_MODULES: for _, text in ttr.errors: result['obspy'][module]['failures']['f%s' % (failures)] = text failures += 1 for _, text in ttr.failures: result['obspy'][module]['failures']['f%s' % (failures)] = text failures += 1 else: for _, text in ttr.errors: result['obspy'][module]['errors']['f%s' % (errors)] = text errors += 1 for _, text in ttr.failures: result['obspy'][module]['errors']['f%s' % (errors)] = text errors += 1 # get dependencies result['dependencies'] = {} for module in DEPENDENCIES: temp = module.split('.') try: mod = __import__(module, fromlist=temp[1:]) if module == '_omnipy': result['dependencies'][module] = mod.coreVersion() else: result['dependencies'][module] = mod.__version__ except: result['dependencies'][module] = '' # get system / environment settings result['platform'] = {} for func in [ 'system', 'release', 'version', 'machine', 'processor', 'python_version', 'python_implementation', 'python_compiler', 'architecture' ]: try: temp = getattr(platform, func)() if isinstance(temp, tuple): temp = temp[0] result['platform'][func] = temp except: result['platform'][func] = '' # set node name to hostname if set result['platform']['node'] = hostname # post only the first part of the node name (only applies to MacOS X) try: result['platform']['node'] = result['platform']['node'].split('.')[0] except: pass # test results result['tests'] = tests result['errors'] = errors result['failures'] = failures result['skipped'] = skipped # generate XML document def _dict2xml(doc, result): for key, value in result.iteritems(): key = key.split('(')[0].strip() if isinstance(value, dict): child = etree.SubElement(doc, key) _dict2xml(child, value) elif value is not None: if isinstance(value, unicode): etree.SubElement(doc, key).text = value elif isinstance(value, str): etree.SubElement(doc, key).text = unicode(value, 'utf-8') else: etree.SubElement(doc, key).text = str(value) else: etree.SubElement(doc, key) root = etree.Element("report") _dict2xml(root, result) xml_doc = etree.tostring(root) print # send result to report server params = urllib.urlencode({ 'timestamp': timestamp, 'system': result['platform']['system'], 'python_version': result['platform']['python_version'], 'architecture': result['platform']['architecture'], 'tests': tests, 'failures': failures, 'errors': errors, 'modules': len(ttrs), 'xml': xml_doc }) headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain" } conn = httplib.HTTPConnection(server) conn.request("POST", "/", params, headers) # get the response response = conn.getresponse() # handle redirect if response.status == 301: o = urlparse(response.msg['location']) conn = httplib.HTTPConnection(o.netloc) conn.request("POST", o.path, params, headers) # get the response response = conn.getresponse() # handle errors if response.status == 200: print("Test report has been sent to %s. Thank you!" % (server)) else: print("Error: Could not sent a test report to %s." % (server)) print(response.reason)
def characters(self, ch): if self._in_entity: return elif not self._in_cdata: ch = escape(ch) self._out.write('%s%s%s' % ( self._o_d[_TEXT][0], ch.encode(self._cod), self._o_d[_TEXT][1]))
def upload_ea( jamf_url, enc_creds, ea_name, script_path, verbosity, cli_custom_keys, obj_id=None, ): """Update extension attribute metadata.""" # import script from file and replace any keys in the script with open(script_path, "r") as file: script_contents = file.read() # substitute user-assignable keys # pylint is incorrectly stating that 'verbosity' has no value. So... # pylint: disable=no-value-for-parameter script_contents = actions.substitute_assignable_keys( script_contents, cli_custom_keys, verbosity ) # XML-escape the script script_contents_escaped = escape(script_contents) # build the object ea_data = ( "<computer_extension_attribute>" + "<name>{}</name>".format(ea_name) + "<enabled>true</enabled>" + "<description/>" + "<data_type>String</data_type>" + "<input_type>" + " <type>script</type>" + " <platform>Mac</platform>" + " <script>{}</script>".format(script_contents_escaped) + "</input_type>" + "<inventory_display>Extension Attributes</inventory_display>" + "<recon_display>Extension Attributes</recon_display>" + "</computer_extension_attribute>" ) # if we find an object ID we put, if not, we post if obj_id: url = "{}/JSSResource/computerextensionattributes/id/{}".format( jamf_url, obj_id ) else: url = "{}/JSSResource/computerextensionattributes/id/0".format(jamf_url) if verbosity > 2: print("Extension Attribute data:") print(ea_data) print("Uploading Extension Attribute..") # write the template to temp file template_xml = curl.write_temp_file(ea_data) count = 0 while True: count += 1 if verbosity > 1: print("Extension Attribute upload attempt {}".format(count)) method = "PUT" if obj_id else "POST" r = curl.request(method, url, enc_creds, verbosity, template_xml) # check HTTP response if curl.status_check(r, "Extension Attribute", ea_name) == "break": break if count > 5: print("ERROR: Extension Attribute upload did not succeed after 5 attempts") print("\nHTTP POST Response Code: {}".format(r.status_code)) break sleep(10) if verbosity > 1: api_get.get_headers(r) # clean up temp files if os.path.exists(template_xml): os.remove(template_xml)
def html_escape(self, text): return escape(text)