def add_top_part(nr_results, offset, search): html = """<div class="container"><div class="row">""" if nr_results == 0: html += """<div class="col-sm-8""> <span class="border-success"> <h3>No Pastes found for "%s"</h3> </span> </div>""" % html_escape(search) else: html += """<div class="col-sm-8""> <span class="border-success"> <h3>%s Pastes found for "%s"</h3> </span> </div> """ % (nr_results, html_escape(search)) html += """<div class="col-sm-4"> <form class="form-search" action="/cgi-bin/rule_classification.py" > <div class="input-group"> <input type="text" placeholder="Search.." name="search" class="form-control form-control"> <input type="hidden" name="search_offset" value=0> <span class="input-group-btn-lg"> <button type="submit" class="btn btn-outline-success">Submit </button> </span> </div> </form> </div>""" html += """</div></div>""" print html
def export(db): """ 输出书签下载 """ #--导出数据-- cats = {} db.execute('SELECT ID,TITLE FROM CATEGORY') for cid, title in db.getall(): cats[int(cid)] = html_escape(title) db.execute('SELECT CATEGORY_ID,URL,TITLE,1304560792 FROM URLS') data = {} # dt = {} for cid, url, title, adddate in db.getall(): cid = int(cid) if cid in data: data[cid].append( (html_escape(url), html_escape(title), int(adddate))) else: data[cid] = [(html_escape(url), html_escape(title), int(adddate))] if cid in dt: dt[cid] = min(dt[cid], int(adddate)) else: dt[cid] = int(adddate) #开始合并最终结果 #(categoryname,adddate,[(url,title,adddate)]) content = [] for cid in cats: if cid in data: content.append((cats[cid], dt[cid], data[cid])) htmlcode = exportBookmark(content) response['Content-Type'] = 'text/html' response['Content-Disposition'] = 'attachment; filename="bookmarks.html"' return compressit(htmlcode)
def export(db): """ 输出书签下载 """ #--导出数据-- cats = {} db.execute('SELECT ID,TITLE FROM CATEGORY') for cid,title in db.getall(): cats[int(cid)]=html_escape(title) db.execute('SELECT CATEGORY_ID,URL,TITLE,1304560792 FROM URLS') data = {} # dt = {} for cid,url,title,adddate in db.getall(): cid = int(cid) if cid in data: data[cid].append((html_escape(url),html_escape(title),int(adddate))) else: data[cid]= [(html_escape(url),html_escape(title),int(adddate))] if cid in dt: dt[cid] = min(dt[cid],int(adddate)) else: dt[cid] = int(adddate) #开始合并最终结果 #(categoryname,adddate,[(url,title,adddate)]) content = [] for cid in cats: if cid in data: content.append( (cats[cid],dt[cid],data[cid]) ) htmlcode = exportBookmark(content) response['Content-Type'] = 'text/html' response['Content-Disposition'] = 'attachment; filename="bookmarks.html"' return htmlcode
def do_search(search, offset, nr_results=None): # look through database return limit number of pastes, print them if search is None: search = "" bool_search = search if nr_results is None: cursor.execute( """SELECT COUNT(*) FROM identifier WHERE category=%(search)s""", {'search': bool_search}) nr_results = cursor.fetchall()[0][0] print html_escape(cursor.statement) sql_offset = offset * SC_PER_PAGE cursor.execute( """SELECT * FROM identifier INNER JOIN scrape ON identifier.scrape_id = scrape.id WHERE category=%(search)s ORDER by scrape_id DESC LIMIT %(sql_offset)s, %(SC_PER_PAGE)s""", { 'search': bool_search, 'sql_offset': sql_offset, 'SC_PER_PAGE': SC_PER_PAGE }) print html_escape(cursor.statement) result = cursor.fetchall() add_top_part(nr_results, offset, search) if nr_results == 0: return html = """<div class="container">""" html += """<table cellpadding="0" cellspacing="0" border="0" class="datatable table table-striped table-bordered" id="results"> <thead> <tr> <th>Title</th> <th onclick="sortTableNumber(1)">Creation Date</th> <th> Raw Preview</th> </tr></thead>""" for (id1, scrape_id, category, nr_of_results, id2, scrape_url, full_url, date, paste_key, size, expire, title, syntax, raw) in result: if title == "": title = "Untitled" html += "<tr>\n" html += """<td> <a href="/cgi-bin/paste_inspection.py?id=%s" target="_blank">%s</a> </td>\n""" % ( id2, title.encode('utf-8')) html += "<td data-ts=%s> %s </td>\n" % ( int(date), datetime.datetime.fromtimestamp( int(date)).strftime('%d-%m-%Y %H:%M:%S')) raw1, raw2 = highlight(html_escape(raw.encode('utf-8')), "", identifier=search) if raw2 is not None: html += """<td> <p>%s</p> <p>%s</p> </td>\n""" % (raw1, raw2) else: html += """<td> <p>%s</p></td>\n""" % raw1 html += "</tr>\n" html += "</table>\n" html += "</div>" print html add_bottom_part(search, offset, nr_results) print """<footer class="footer"> <div class="jumbotron text-center" style="margin-bottom:0">
def add_top_part(nr_results, offset, search): html = """<div class="container"><div class="row">""" if nr_results == 0: html += """<div class="col-sm-8""> <span class="border-success"> <h3>No Pastes found for "%s"</h3> </span> </div>""" % html_escape(search) else: html += """<div class="col-sm-8""> <span class="border-success"> <h3>%s Pastes found for "%s"</h3> </span> </div> """ % (nr_results, html_escape(search)) print html
def njs(db): """ 新的JS模版. 未登陆的返回一个错误的Token好了。 这样用户选择的时候会出现需要登陆的提示。 模版: {{tags}},{{categorys}} 输入: url,title """ existed = False lasttime = '' tips = '' url = request.GET.get('url','') #可以用来进行判断预分配的,暂时还没用上 if url: cleanurl = normaliseurl(url) shortid = getshorturlid(url) db.execute('SELECT URL,TIE_TIME FROM URLS WHERE SHORT_URL_ID=%s',(shortid)) for url,tietime in db.getall(): #怕不同URL得到相同的SHORT_URL_ID u = normaliseurl(url) if u == cleanurl: existed = True lasttime = tietime break cats = [] for cid,title in getCategory(db): cats.append('<option value="%s">%s</option>' % (cid,html_escape(title))) #@todo: 尝试去获取最可能的Categorys和Tags. ret = template('jsp',tags="",categorys=''.join(cats),httpdomain=CFG.domain,existed=existed,lasttime=lasttime,tips=tips) response['Content-Type'] = 'application/javascript' return ret
def esca(v): val = str(v) val = utils.html_escape(val) if type(v) == str: val = v.replace("\n", "\\n") val = "\"" + val + "\"" return val
def __render_tag(info, state): """ Render an individual tag by making the appropriate replacement within the current context (if any). """ new_contexts, context_match = get_tag_context(info['tag_key'], state) replacement = '' if context_match or context_match == 0: replacement = context_match elif info['tag_key'] == '.': replacement = state.context() else: replacement = '' # Call all callables / methods / lambdas / functions if replacement and callable(replacement): replacement = make_unicode(replacement()) state.push_tags(state.default_tags) replacement = __render(template=replacement, state=state) state.pop_tags() for i in xrange(new_contexts): state.context.pop() if state.escape(): return html_escape(replacement) return replacement
async def setprofile(msg: Message) -> None: if not msg.arg: await msg.reply("Specifica una frase da inserire nel tuo profilo") return if len(msg.arg) > 250: await msg.reply("Errore: lunghezza massima 250 caratteri") return # authorized: True if msg.user can approve new descriptions. authorized = msg.user.has_role("driver", msg.conn.main_room) db = Database.open() with db.get_session() as session: userid = msg.user.userid session.add(d.Users(userid=userid)) query_ = session.query(d.Users).filter_by(userid=userid) if authorized: # Authorized users skip the validation process. query_.update({"description": msg.arg, "description_pending": ""}) else: query_.update({"description_pending": msg.arg}) await msg.reply("Salvato") if not authorized: username = utils.html_escape(msg.user.username) botname = msg.conn.username cmd = f"{msg.conn.command_character}pendingdescriptions" message = ( f"{username} ha aggiornato la sua frase del profilo.<br>" f'Usa <button name="send" value="/pm {botname}, {cmd}">{cmd}</button> ' "per approvarla o rifiutarla" ) await msg.conn.main_room.send_rankhtmlbox("%", message)
def to_html(msgid, value, html_tag='span'): po_entry, po_lib = get_po_entry(msgid) return u"<{html_tag} data-msgid='{msgid}' class='_itr {not_translated}'>{value}</{html_tag}>".format( msgid=html_escape(msgid), value=value, not_translated='not_translated' if not po_entry or not po_entry.translated() else '', html_tag=html_tag)
def test_widget_with_default_settings(self): """ Test the widget with default settings which is defined in django settings file """ zoom = 18 map_size = "400x400" thumbnail_size = "100x100" widget_settings = { "GoogleStaticOverlayMapWidget": ( ("zoom", zoom), ("size", map_size), ("thumbnail_size", thumbnail_size), ), "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticOverlayMapWidget() settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) self.assertEqual(settings.get("thumbnail_size"), thumbnail_size) # test render point = Point(-92.9903, 34.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urllib.urlopen(map_image_url) self.assertEqual(res.getcode(), 200) self.assertEqual(res.info().type, "image/png") # test thumbnail_image_url thumbnail_url = widget.get_thumbnail_url(point) res = urllib.urlopen(thumbnail_url) self.assertEqual(res.getcode(), 200) self.assertEqual(res.info().type, "image/png") # test map_image_url with `None` value result = widget.render(name=widget_html_elem_name, value=None, attrs={'id': widget_html_elem_id}) thumbnail_url = widget.get_thumbnail_url(None) self.assertIn(thumbnail_url, result)
def print_snippet(snippet_lines, current_line_nb): self.out.write("<hr><font color=black>") for i in range(len(snippet_lines) ): pre = "" post = "" if i == current_line_nb: pre = "<font color=red><b>" post = "</b></font>" line = pre + utils.html_escape( snippet_lines[i]) + post self.out.write(line) self.out.write("</font><hr>")
def add_var_to_tree(tree, key, val): if inspect.isclass(val): return if inspect.isroutine(val): return if inspect.ismodule(val): return if key.startswith("_"): return name = utils.html_escape(key) value = esca(val) tree.add_file_text(name + ":" + value)
def printVar(key, val): if inspect.isclass(val): return if inspect.isroutine(val): return if inspect.ismodule(val): return if key.startswith("_"): return key = utils.html_escape(key) val = esca(val) val = '<a class="val">' + val + "</a>" self.out.write(" " +key+ ": "+ val + "\n")
def test_widget_with_custom_settings(self): """ Test the widget with custom settings which is updated by `settings` parameter """ zoom = 18 map_size = "300x300" thumbnail_size = "75x75" widget_settings = { "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticOverlayMapWidget( zoom=zoom, size=map_size, thumbnail_size=thumbnail_size) settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) # test render point = Point(-105.9903, 38.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urllib.urlopen(map_image_url) self.assertEqual(res.getcode(), 200) self.assertEqual(res.info().type, "image/png") # test thumbnail_image_url thumbnail_url = widget.get_thumbnail_url(point) res = urllib.urlopen(thumbnail_url) self.assertEqual(res.getcode(), 200) self.assertEqual(res.info().type, "image/png")
async def colorcompare(conn: Connection, room: Optional[str], user: str, arg: str) -> None: if arg == "": return cell = "<td>" cell += ' <div style="background:{color};text-align:center"><br><br>{username}<br><br><br></div>' cell += "</td>" html = '<table style="width:100%;table-layout:fixed">' html += "<tr>" for i in arg.split(","): html += cell.format( color=utils.username_color(utils.to_user_id(i)), username=utils.html_escape(i), ) html += "</tr>" html += "</table>" await conn.send_htmlbox(room, user, html)
def test_widget_with_custom_settings(self): """ Test the widget with custom settings which is updated by `settings` parameter """ zoom = 18 map_size = "300x300" thumbnail_size = "75x75" widget_settings = { "GOOGLE_MAP_API_KEY": GOOGLE_MAP_API_KEY, } with override_settings(MAP_WIDGETS=widget_settings): reload_module(mw_widgets) widget = mw_widgets.GoogleStaticOverlayMapWidget(zoom=zoom, size=map_size, thumbnail_size=thumbnail_size) settings = widget.map_settings # test `map_settings` method self.assertEqual(settings.get("zoom"), zoom) self.assertEqual(settings.get("size"), map_size) # test render point = Point(-105.9903, 38.7392) widget_html_elem_id = "id_location" widget_html_elem_name = "location" result = widget.render(name=widget_html_elem_name, value=point, attrs={'id': widget_html_elem_id}) map_image_url = widget.get_image_url(point) self.assertIn(GOOGLE_MAP_API_KEY, map_image_url) self.assertIn(html_escape(map_image_url), result) # test map_image_url res = urllib.urlopen(map_image_url) self.assertEqual(res.getcode(), 200) self.assertEqual(res.info().type, "image/png") # test thumbnail_image_url thumbnail_url = widget.get_thumbnail_url(point) res = urllib.urlopen(thumbnail_url) self.assertEqual(res.getcode(), 200) self.assertEqual(res.info().type, "image/png")
def getUrls(db): #得到全部合法URL列表 #返回的数据列表 #1. urlid #2. categoryid #3. url #4. title #5. tag数组 #6. notes #7. posttime(字符串) 2013-01-01格式 #8. 此条的Archive状态(int)(-1:用户无权限,0:没提交的,1:提交了正在Archiving,2:等待提交Archining,3:Archived完成了) #9. isheart (int) data = [] iscan = needarchive(db) and archiveapiok(db) db.execute( 'SELECT ID,CATEGORY_ID,URL,TITLE,TAG,NOTES,TIE_TIME,IS_HEART,IS_ARCHIVED,ARCHIVED_OK FROM URLS ORDER BY ID DESC' ) for urlid, cid, url, title, tag, notes, pt, isheart, isarchive, archivedok in db.getall( ): tag = refineTag(tag).lower() tags = [x for x in tag.split(',') if x] notes = "\n ".join( filter(None, re.split("\n|\r", html_escape(notes)))) if iscan: thisarchive = 0 #用户开启 else: thisarchive = -1 #用户未启用Archive的 if int(archivedok) == 1: #说明提交并完成了的 thisarchive = 3 #可以看Archive结果了 elif int(isarchive) in [1, 2]: #提交了,还未成功的/正在等待提交的 thisarchive = int(isarchive) #running中 data.append( (int(urlid), int(cid), url, title, tags, notes, pt, thisarchive, int(isheart))) return data
def njs(db): """ 新的JS模版. 未登陆的返回一个错误的Token好了。 这样用户选择的时候会出现需要登陆的提示。 模版: {{tags}},{{categorys}} 输入: url,title """ existed = False lasttime = '' tips = '' url = request.GET.get('url', '') #可以用来进行判断预分配的,暂时还没用上 if url: cleanurl = normaliseurl(url) shortid = getshorturlid(url) db.execute('SELECT URL,TIE_TIME FROM URLS WHERE SHORT_URL_ID=%s', (shortid)) for url, tietime in db.getall(): #怕不同URL得到相同的SHORT_URL_ID u = normaliseurl(url) if u == cleanurl: existed = True lasttime = tietime break cats = [] for cid, title in getCategory(db): cats.append('<option value="%s">%s</option>' % (cid, html_escape(title))) #@todo: 尝试去获取最可能的Categorys和Tags. ret = template('jsp', tags="", categorys=''.join(cats), httpdomain=CFG.domain, existed=existed, lasttime=lasttime, tips=tips) response['Content-Type'] = 'application/javascript' return ret
def getUrls(db): #得到全部合法URL列表 #返回的数据列表 #1. urlid #2. categoryid #3. url #4. title #5. tag数组 #6. notes #7. posttime(字符串) 2013-01-01格式 #8. 此条的Archive状态(int)(-1:用户无权限,0:没提交的,1:提交了正在Archiving,2:等待提交Archining,3:Archived完成了) #9. isheart (int) data = [] iscan = needarchive(db) and archiveapiok(db) db.execute('SELECT ID,CATEGORY_ID,URL,TITLE,TAG,NOTES,TIE_TIME,IS_HEART,IS_ARCHIVED,ARCHIVED_OK FROM URLS ORDER BY ID DESC') for urlid,cid,url,title,tag,notes,pt,isheart,isarchive,archivedok in db.getall(): tag = refineTag(tag).lower() tags = [x for x in tag.split(',') if x] notes = "\n ".join(filter(None,re.split("\n|\r",html_escape(notes)))) if iscan: thisarchive = 0 #用户开启 else: thisarchive = -1 #用户未启用Archive的 if int(archivedok) == 1: #说明提交并完成了的 thisarchive = 3 #可以看Archive结果了 elif int(isarchive) in [1,2]: #提交了,还未成功的/正在等待提交的 thisarchive = int(isarchive) #running中 data.append(( int(urlid),int(cid), url,title,tags,notes,pt, thisarchive,int(isheart) )) return data
def predict(self, tokens, true_label=None, output_file_path=None): from models.protoconv.return_wrappers import PrototypeDetailPrediction output: PrototypeDetailPrediction = self.model(tokens) similarities = self.local(self.model.dist_to_sim['log']( output.min_distances.squeeze(0))) evidence = (similarities * self.fc_weights) sorting_indexes = np.argsort(evidence) enabled_prototypes_indexes = [ i for i in sorting_indexes if self.model.enabled_prototypes_mask[i] ] positive_protos_idxs = [[1, i] for i in enabled_prototypes_indexes[::-1] if evidence[i] > 0] negative_protos_idxs = [[0, i] for i in enabled_prototypes_indexes if evidence[i] < 0] sum_of_evidence = { 0: np.sum(evidence[self.fc_weights < 0]) * -1, 1: np.sum(evidence[self.fc_weights > 0]) } y_pred: int = int(output.logits > 0) words = [self.model.itos[j] for j in list(tokens[0])] text = " ".join(words) VisRepresentation = namedtuple( "VisRepresentation", "patch_text proto_text similarity weight evidence") prototypes_vis_per_class = defaultdict(list) for class_id, prototype_idx in negative_protos_idxs[: 3] + positive_protos_idxs[: 3]: patch_center_id = np.argmin( self.local(output.distances)[0, prototype_idx, :]) if len(words) > self.context: patch_center_id = min(max(self.context + 1, patch_center_id), len(words) - 1 - self.context - 1) first_index = max(0, patch_center_id - self.context) last_index = min(patch_center_id + self.context + 1, len(words) - 1) patch_words = words[first_index:last_index] patch_words_str = html_escape(' '.join( w for w in patch_words if w not in ['<START>', '<END>'])) prototype_html = self.prototypes[prototype_idx] multiplier = 1 if class_id else -1 prototypes_vis_per_class[class_id].append( VisRepresentation(patch_words_str, prototype_html, similarities[prototype_idx], self.fc_weights[prototype_idx] * multiplier, evidence[prototype_idx] * multiplier)) y_true_text = '' if true_label is not None: y_true_text = f', <b>True</b>: {true_label}' lines = [ f'<b>Example</b>: {text} <br><br>' f'<b>Prediction</b>: {y_pred}{y_true_text}<br>' ] for class_id, representations in prototypes_vis_per_class.items(): lines.append(f'Evidence for class {class_id}:') lines.append( '<table style="width:800px"><tr><td><b>Input</b></td><td><b>Prototype</b></td>' '<td><b>Similarity * Weight</b></td></tr>') for repr in representations: line = f'<tr><td><span">{repr.patch_text} </span> </td> <td> {repr.proto_text} </td> <td>{repr.similarity:.2f} * {repr.weight:.2f} = <b>{repr.evidence:.2f}</b></td></tr>' lines[-1] += line lines[-1] += '</table>' lines[ -1] += f'Sum of evidence for class {class_id}: <b>{sum_of_evidence[class_id]:.2f}</b><br>' text = '<br>'.join(lines) if output_file_path is not None: with open(output_file_path, 'w') as f: f.write(text) return text
def do_search(search, identifier, offset, start, end, order_input, nr_results=None): # look through database return limit number of pastes, print them if identifier == "none": identifier = None if start is None: start_time = None else: start_time = to_unix(start) if end is None: end_time = None else: end_time = to_unix(end) if search is None: search = "" where = "" limit = "%(sql_offset)s, %(SC_PER_PAGE)s" if identifier is not None: table = "identifier INNER JOIN scrape ON identifier.scrape_id = scrape.id" select = "scrape_id, scrape_url, full_url, date, paste_key, size, expire, title, syntax, raw" order = get_order("scrape_id", order_input) where = add_to_where(where, "category=%(identifier)s") else: table = "scrape" order = get_order("id", order_input) select = "*" bool_search = "(\"" + search + "\")" if identifier is None: where = add_to_where(where, "MATCH(raw) AGAINST(%(search)s IN BOOLEAN MODE)") else: if search != "": where = add_to_where(where, "MATCH(raw) AGAINST(%(search)s IN BOOLEAN MODE)") tc = time_constraint(start_time, end_time) if tc is not None: where = add_to_where(where, tc) if nr_results is None: query = """SELECT COUNT(*) FROM %s WHERE %s""" % (table, where) cursor.execute(query, {'identifier': identifier, 'search': bool_search, 'start': start_time, 'end': end_time}) nr_results = cursor.fetchall()[0][0] sql_offset = offset * SC_PER_PAGE query = """SELECT %s FROM %s WHERE %s ORDER by %s LIMIT %s""" % (select, table, where, order, limit) cursor.execute(query, {'identifier': identifier, 'search': bool_search, 'start': start_time, 'end': end_time, 'sql_offset': sql_offset, 'SC_PER_PAGE': SC_PER_PAGE}) result = cursor.fetchall() add_top_part(nr_results, offset, search) if nr_results == 0: return html = """<div class="container">""" html += """<table cellpadding="0" cellspacing="0" border="0" class="datatable table table-striped table-bordered" id="results"> <thead> <tr> <th>Title</th> <th>Creation Date</th> <th> Raw Preview</th> </tr></thead>""" for (id, scrape_url, full_url, date, paste_key, size, expire, title, syntax, raw) in result: if title == "": title = "Untitled" html += "<tr>\n" html += """<td> <a href="/cgi-bin/paste_inspection.py?id=%s" target="_blank">%s</a> </td>\n""" % (id, title.encode('utf-8')) html += "<td data-ts=%s> %s </td>\n" % (int(date), datetime.datetime.fromtimestamp(int(date)).strftime('%d-%m-%Y %H:%M:%S')) raw1, raw2 = highlight(html_escape(raw.encode('utf-8')), html_escape(search), identifier=identifier) if raw2 is not None: html += """<td> <p>%s</p> <p>%s</p> </td>\n""" % (raw1, raw2) else: html += """<td> <p>%s</p></td>\n""" % raw1 html += "</tr>\n" html += "</table>\n" html += "</div> </div> </div>" print html add_bottom_part(search, offset, nr_results, start, end, order_input) print """<div class="footer"> <div class="jumbotron text-center" style="margin-bottom:0">
async def profile(conn: Connection, room: Optional[str], user: str, arg: str) -> None: # pylint: disable=too-many-locals if arg.strip() == "": arg = user arg = utils.to_user_id(arg) db = Database() sql = "SELECT * FROM users WHERE userid = ?" body = db.execute(sql, [arg]).fetchone() if body: body = dict(body) sql = "SELECT image, label " sql += " FROM badges " sql += " WHERE userid = ? ORDER BY id" body["badges"] = db.execute(sql, [body["userid"]]).fetchall() html = "<div>" html += ' <div style="display: table-cell; width: 80px; vertical-align: top">' html += ' <img src="https://play.pokemonshowdown.com/sprites/{avatar_dir}/{avatar_name}.png"' html += ' width="80" height="80">' html += " </div>" html += ' <div style="display: table-cell; width: 100%; vertical-align: top">' html += ' <b style="color: {name_color}">{username}</b><br>{badges}' if body["description"] and body["description"].strip() != "": html += ' <hr style="margin: 4px 0">' html += ' <div style="text-align: justify">{description}</div>' html += " </div>" html += "</div>" if body["avatar"][0] == "#": avatar_dir = "trainers-custom" avatar_name = body["avatar"][1:] else: avatar_dir = "trainers" avatar_name = body["avatar"] username = body["username"] name_color = utils.username_color(utils.to_user_id(username)) badges = "" badge = '<img src="{image}" width="12" height="12" title="{title}"' badge += ' style="border: 1px solid; border-radius: 2px; margin: 2px 1px 0 0">' for i in body["badges"]: badges += badge.format(image=i["image"], title=utils.html_escape(i["label"])) description = utils.html_escape(body["description"]) await conn.send_htmlbox( room, user, html.format( avatar_dir=avatar_dir, avatar_name=avatar_name, name_color=name_color, username=username, badges=badges, description=description, ), )
def make_latex(formula): return '${}$'.format(html_escape(to_latex(formula)))
async def learnset(conn: Connection, room: Optional[str], user: str, arg: str) -> None: args = arg.split(",") if len(args) < 2: return pokemon = utils.to_user_id(utils.remove_accents(args[0].lower())) version_group = utils.to_user_id(utils.remove_accents(args[1].lower())) db = Database("veekun") sql = "SELECT id FROM version_groups WHERE identifier = ?" version_group_id = db.execute(sql, [version_group]).fetchone() if version_group_id is None: sql = "SELECT version_group_id FROM versions WHERE identifier = ?" version_group_id = db.execute(sql, [version_group]).fetchone() if version_group_id is None: return version_group_id = version_group_id[0] sql = """SELECT pokemon_moves.version_group_id, pokemon_moves.pokemon_move_method_id, (SELECT GROUP_CONCAT(IFNULL(version_names.name, ''), '/') FROM versions LEFT JOIN version_names ON version_names.version_id = versions.id AND version_names.local_language_id = 9 WHERE versions.version_group_id = pokemon_moves.version_group_id ORDER BY versions.id) AS version_group, IFNULL(move_names.name, '') AS move_name, IFNULL(pokemon_move_method_prose.name, '') AS method_name, IFNULL(pokemon_moves.level, 0) AS level, IFNULL(item_names.name, '') AS machine FROM pokemon_species LEFT JOIN pokemon ON pokemon.species_id = pokemon_species.id LEFT JOIN pokemon_moves ON pokemon_moves.pokemon_id = pokemon.id JOIN version_groups ON version_groups.id = pokemon_moves.version_group_id JOIN moves ON moves.id = pokemon_moves.move_id LEFT JOIN move_names ON move_names.move_id = moves.id AND move_names.local_language_id = 9 JOIN pokemon_move_methods ON pokemon_move_methods.id = pokemon_moves.pokemon_move_method_id LEFT JOIN pokemon_move_method_prose ON pokemon_move_method_prose.pokemon_move_method_id = pokemon_move_methods.id AND pokemon_move_method_prose.local_language_id = 9 LEFT JOIN machines ON machines.move_id = moves.id AND pokemon_move_methods.id = 4 AND machines.version_group_id = version_groups.id LEFT JOIN item_names ON item_names.item_id = machines.item_id AND item_names.local_language_id = 9 WHERE pokemon_species.identifier = ? AND version_groups.id = ? ORDER BY pokemon_moves.pokemon_move_method_id, pokemon_moves.level, machines.machine_number, move_names.name""" html = "" current_move_method_id = 0 for row in db.execute(sql, [pokemon, version_group_id]): if current_move_method_id != row["pokemon_move_method_id"]: if current_move_method_id != 0: html += "</tbody></table>" html += "</details>" html += ( "<details><summary><b><big>" + utils.html_escape(row["method_name"]) + "</big></b></summary>" ) html += '<table style="margin: 5px 0"><tbody>' html += "<tr>" html += " <th>Move</th>" if row["pokemon_move_method_id"] == 1: # level-up html += " <th>Level</th>" elif row["pokemon_move_method_id"] == 2: # egg pass elif row["pokemon_move_method_id"] == 4: # machine html += " <th>Machine</th>" html += "</tr>" current_move_method_id = row["pokemon_move_method_id"] html += "<tr>" html += " <td>" + utils.html_escape(row["move_name"]) + "</td>" if current_move_method_id == 1: # level-up html += ( ' <td style="text-align: right">' + utils.html_escape(str(row["level"])) + "</td>" ) elif current_move_method_id == 2: # egg pass elif current_move_method_id == 4: # machine html += " <td>" + utils.html_escape(row["machine"]) + "</td>" html += "</tr>" if current_move_method_id != 0: html += "</tbody></table>" html += "</details>" if not html: await conn.send_reply(room, user, "Nessun dato") return await conn.send_htmlbox(room, user, '<div class="ladder">' + html + "</div>")
def to_html_quotebox(quote: str) -> str: """Generates HTML that shows a quote. Args: quote (str): Raw quote string, added through `.addquote`. Raises: BaseException: quote is empty. Returns: str: htmlbox. """ if not quote: # This shouldn't happen because empty quotes are ignored by `.addquote`. raise BaseException("Trying to create quotebox for empty quote.") # Valid timestamp formats: [xx:xx], [xx:xx:xx] timestamp_regex = r"(\[\d{2}:\d{2}(?::\d{2})?\])" splitted = re.split(timestamp_regex, quote) # Return the quote unparsed if it has a custom format, aka one of these conditions # applies: # (1) Quote doesn't start with a timestamp. # (2) Quote only has timestamps. if splitted[0] or not any(part.lstrip() for part in splitted[::2]): return utils.linkify(quote) lines: List[str] = [] for timestamp, phrase in zip(splitted[1::2], splitted[2::2]): # Wrap every line in a <div class="chat"></div> and if it is a regular chat # message format it accordingly. phrase = phrase.lstrip() if not phrase: # Timestamp with an empty phrase. # Append the timestamp to the previous phrase, it was probably part of it. if not lines: lines.append(timestamp) else: lines[-1] += timestamp elif ": " in phrase and phrase[0] != "(": # phrase is a chat message. # Example: "[03:56] @Plat0: Hi" # userstring: Username, optionally preceded by its rank. # body: Content of the message sent by the user. userstring, body = phrase.split(": ", 1) # rank: Character rank or "" (not " ") in case of a regular user. # username: userstring variable stripped of the character rank. if userstring[0] not in string.ascii_letters + string.digits: rank = userstring[0] username = userstring[1:] else: rank = "" username = userstring # Escape special characters: needs to be done last. # Timestamp doesn't need to be escaped. rank = utils.html_escape(rank) username = utils.html_escape(username) body = utils.linkify(body) lines.append(f"<small>{timestamp} {rank}</small>" f"<username>{username}:</username> " f"<em>{body}</em>") else: # phrase is a PS message that may span over multiple lines. # Example: "[14:20:43] (plat0 forcibly ended a tournament.)" # Text contained within round parentheses is considered a separated line. # This is true for most use-cases but it's still euristic. sublines = re.split(r"(\(.*\))", phrase) sublines = [utils.linkify(s) for s in sublines if s.strip()] # The timestamp is written only on the first subline. sublines[0] = f"<small>{timestamp}</small> <em>{sublines[0]}</em>" lines += sublines # Merge lines html = '<div class="message-log" style="display: inline-block">' for line in lines: html += f'<div class="chat">{line}</div>' html += "</div>" return html