def notify(self, item: FeedItem): if item.is_new: full_content = html_to_text(item.full_content) else: old = html_to_text(item.old_full_content) new = html_to_text(item.full_content) full_content = html_diff_to_markdown(html_diff2(old, new)) link = item.link title = item.title is_new_text = ('New' if item.is_new else 'Changed') full_content = shorten(full_content, 8000) if self.message_type == "text": send_text_msg(self.url, f"【{is_new_text}】{full_content}.... {link}") return if self.message_type == "actionCard": logger.info(f"正在发送 {full_content}.... {link}") send_action_card(self.url, title=f"【{is_new_text}】 {title}", text=f"【{is_new_text}】 {full_content} {link}", btnOrientation=0, btns=[{ "title": "查看原文", "actionURL": link }]) return raise Exception("Unknown Message Type " + self.message_type)
def onMediaReceived(self, messageId, jid, preview, url, size, receiptRequested, isBroadcast): buddy = jid.split("@")[0] self.logger.info("Media received from %s: %s", buddy, url) self.sendMessageToXMPP(buddy, utils.shorten(url)) if receiptRequested: self.call("message_ack", (jid, messageId))
def parse_item(item_tree): item_dict = {} title_subtree = item_tree.find('title') if title_subtree is not None: item_dict['title'] = utils.strip_markup(title_subtree.text.strip()) desc_subtree = item_tree.find('description') if desc_subtree is not None: item_dict['description'] = desc_subtree.text.strip() if 'title' not in item_dict: item_dict['title'] = utils.shorten(utils.strip_markup(item_dict['description']), 30) guid_subtree = item_tree.find('guid') if guid_subtree is not None: text = guid_subtree.text.strip() has_permalink_attrib = 'isPermaLink' in guid_subtree.attrib marked_permalink = has_permalink_attrib and (guid_subtree.attrib['isPermaLink'] == True) if marked_permalink: item_dict['url'] = text elif not marked_permalink and utils.is_url(text): item_dict['url'] = text if 'url' not in item_dict: link_subtree = item_tree.find('link') if link_subtree is not None: text = link_subtree.text.strip() item_dict['url'] = text return item_dict
def parse_item(item_tree): item_dict = {} title_subtree = item_tree.find('title') if title_subtree is not None: item_dict['title'] = utils.strip_markup(title_subtree.text.strip()) desc_subtree = item_tree.find('description') if desc_subtree is not None: item_dict['description'] = desc_subtree.text.strip() if 'title' not in item_dict: item_dict['title'] = utils.shorten( utils.strip_markup(item_dict['description']), 30) guid_subtree = item_tree.find('guid') if guid_subtree is not None: text = guid_subtree.text.strip() has_permalink_attrib = 'isPermaLink' in guid_subtree.attrib marked_permalink = has_permalink_attrib and ( guid_subtree.attrib['isPermaLink'] == True) if marked_permalink: item_dict['url'] = text elif not marked_permalink and utils.is_url(text): item_dict['url'] = text if 'url' not in item_dict: link_subtree = item_tree.find('link') if link_subtree is not None: text = link_subtree.text.strip() item_dict['url'] = text return item_dict
def onLocationReceived(self, messageId, jid, name, preview, latitude, longitude, receiptRequested, isBroadcast): buddy = jid.split("@")[0] self.logger.info("Location received from %s: %s, %s", buddy, latitude, longitude) url = "http://maps.google.de?%s" % urllib.urlencode({ "q": "%s %s" % (latitude, longitude) }) self.sendMessageToXMPP(buddy, utils.shorten(url)) if receiptRequested: self.call("message_ack", (jid, messageId))
def fdupef(d): if d[-1:] != '/':d = d+'/' print "Looking in %s" %d files = [] print "Gathering Files..." for aaa, bbb, ccc in os.walk(d): try: for i in ccc: if os.path.isfile(d+i): files.append(i) except KeyboardInterrupt: break names = [] hashes = [] for i in files: try: k = hashlib.md5(open(d+i, 'rb').read()).hexdigest() print "%d/%d - %s - %s"%(files.index(i)+1, len(files), utils.shorten(i, 10), k) names.append(i) hashes.append(k) except: print "%d/%d - %s - %s"%(files.index(i)+1, len(files), utils.shorten(i, 10), "ERROR READING FILE.") dupes = utils.dupecheck(hashes) nind =[hashes.index(i) for i in dupes] newnames = [] for i in nind: newnames.append(names[i]) full = zip(dupes, newnames) def write(named): with open(named+".txt", 'w') as dupesoc: for i in full: dupesoc.write(' -> '.join(i)+'\n') dupesoc.close() print "Found %d Duplicate(s):\n%s" % (len(full), '\n'.join(['--> '+i[1] for i in full])) r = raw_input("Save to text file? (y/n): ").lower() confirming = True while confirming: if r == 'y': write('Duplicates') confirming = False if r == 'n': confirming = False
def parseBiggerlabCourseFeedback(blob): def deDownload(linkArr): return "" if len(linkArr) == 0 else ( linkArr[0][:-9] if linkArr[0].endswith("&download") else linkArr[0]) # 有些link最后会带一个"&download" 这会导致它默认下载而不是预览,所以我们扔掉它。 info = {} info["reasonFilling"] = blob["field_5"] # 填表原因 info["teacherName"] = blob["field_4"] # 导师姓名 info["teacherComment"] = blob["field_19"] # 导师评价 info["studentName"] = blob["field_6"] + blob["field_31"] # 学生姓名 info["courseName"] = blob["field_8"] # 课程名 info["courseStartDate"] = blob["field_9"] # 课程开始日期 info["courseEndDate"] = blob["field_12"] # 课程结束日期 info["courseContent"] = blob["field_13"] # 已上课程内容 info["courseCapturelink"] = blob["field_32"] # 课程视频链接 info["projectScreenshot"] = shorten(deDownload(blob["field_18"])) # 项目作品截图 info["rateAttendance"] = blob["field_14"] # 学生课堂参与度 info["rateUnderstanding"] = blob["field_15"] # 知识点理解情况 info["rateAssignmentCompletion"] = blob["field_16"] # 课堂任务完成情况 info["rateGeneral"] = blob["field_17"] # 综合评价 info["reasonChanging"] = blob["field_20"] # 交接原因 info["nextTeacher"] = blob["field_21"] # 接手导师姓名 info["classType"] = blob["field_23"] # 上课形式 info["classStartTime"] = blob["field_10"] # 上课时间 info["classEndTime"] = blob["field_11"] # 下课时间 info["hoursTaught"] = blob["field_25"] # 已上课时数 info["awardReceived"] = blob["field_33"] # 学生是否获得相应荣誉 info["awardType"] = blob["field_37"] # 荣誉类型 info["projectDescription"] = blob["field_42"] # 学生项目描述/所学内容 info["teacherNotes"] = blob["field_46"] # 导师对该学生的评语 info["awardEvidence"] = shorten(deDownload(blob["field_40"])) # 竞赛获奖证明截图 info["competitionName"] = blob["field_34"] # 比赛名称 info["prizeReceived"] = blob["field_35"] # 获得的奖项 info["projectPublishEvidence"] = shorten(deDownload( blob["field_47"])) # 项目发布证明截图 info["copyrightEvidence"] = shorten(deDownload( blob["field_48"])) # 著作权证明截图 info["projectLink"] = blob["field_43"] # 项目链接 info["sourceCode"] = shorten(deDownload(blob["field_36"])) # 源代码文件 info["admissionEvidence"] = shorten(deDownload( blob["field_49"])) # 名校录取证明截图 info["schoolName"] = blob["field_44"] # 学校名称 info["profession"] = blob["field_45"] # 专业名称 info["awardOthers"] = shorten(deDownload(blob["field_50"])) # 其他荣誉证明截图 return info
def onLocationReceived(self, messageId, jid, name, preview, latitude, longitude, receiptRequested, isBroadcast): buddy = jid.split("@")[0] self.logger.info("Location received from %s: %s, %s", buddy, latitude, longitude) url = "http://maps.google.de?%s" % urllib.urlencode( {"q": "%s %s" % (latitude, longitude)}) self.sendMessageToXMPP(buddy, utils.shorten(url)) if receiptRequested: self.call("message_ack", (jid, messageId))
def filter_story(story, display_tweets): story["urls"] = story["URLs"][:16] for url in story["urls"]: url["display_url"] = shorten(url["link"], 45) del story["URLs"] story["tagCloud"] = story["tagCloud"][:200] story["photos"] = story["photos"][:16] story["polarityface"] = display_polarity(story["polarity"]) return story
def get_next_event_text(calendar, after=None, include_current=None, tz=None, verbose=True): """ Build the actual human-readable response to the !next command. The tz parameter can override the timezone used to display the event times. This can be an actual timezone object, or a string timezone name. Defaults to moonbase time. """ if after is None: after = datetime.datetime.now(datetime.timezone.utc) if not tz: tz = config['timezone'] elif isinstance(tz, str): tz = tz.strip() try: tz = utils.get_timezone(tz) except pytz.exceptions.UnknownTimeZoneError: return "Unknown timezone: %s" % tz events = get_next_event(calendar, after=after, include_current=include_current) if not events: return "There don't seem to be any upcoming scheduled streams" strs = [] for i, ev in enumerate(events): # If several events are at the same time, just show the time once after all of them if i == len(events) - 1 or ev['start'] != events[i+1]['start']: if verbose: if ev['location'] is not None: title = "%(title)s (%(location)s)" % ev else: title = ev['title'] if ev['start'] < after: nice_duration = utils.nice_duration(after - ev['start'], 1) + " ago" else: nice_duration = utils.nice_duration(ev['start'] - after, 1) + " from now" strs.append("%s at %s (%s)" % (title, ev['start'].astimezone(tz).strftime(DISPLAY_FORMAT), nice_duration)) else: strs.append("%s at %s" % (ev['title'], ev['start'].astimezone(tz).strftime(DISPLAY_FORMAT))) else: strs.append(ev['title']) response = ', '.join(strs) if verbose: if calendar == CALENDAR_LRL: response = "Next scheduled stream: " + response elif calendar == CALENDAR_FAN: response = "Next scheduled fan stream: " + response return utils.shorten(response, 450) # For safety
def _import(self, token = None): if not token: token_url = self.google.getTokenUrl("http://whatsapp.0l.de/auth.py") auth_url = "http://whatsapp.0l.de/auth.py?number=%s&auth_url=%s" % (self.session.legacyName, urllib.quote(token_url)) short_url = utils.shorten(auth_url) self.send("please visit this url to auth: %s" % short_url) self.send("waiting for authorization...") token = self.__get_token(TOKEN_FILE) if token: self.send("got token: %s" % token) self.__do_import(token) self.session.updateRoster() else: self.send("timeout! please use \"\\import [token]\"") else: self.__do_import(token) self.session.updateRoster()
def shorten_url(): if not request.json: return bad_request('Url must be provided in json format.', 400) if 'url' not in request.json: return bad_request('Url parameter not found.', 400) if request.method == 'POST': json = request.json url = json['url'] if not url_valid(url): return bad_request('Provided url is not valid.', 400) if 'shortcode' in json: try: sc = Shortcodes.query.filter( Shortcodes.shortcode == json['shortcode']).first() if sc: return bad_request('Shortcode already in used', 409) except Exception: return jsonify({'error': 'Service is temporarily unavailable'}), 500 else: shortcode = json['shortcode'] else: shortcode = shorten(url) if not short_code_valid(shortcode): return bad_request('Shortcode invalide', 412) sc = Shortcodes(url=url, shortcode=shortcode) try: db.session.add(sc) db.session.commit() except sqlalchemy.exc.IntegrityError: return bad_request('url already exists', 400) except Exception as err: return jsonify({'error': 'Service is temporarily unavailable'}), 500 else: return jsonify({'shortened_url': shortcode}), 201
for b, n0, txt in self.blocks(blocks): for c, n, t in get_cards(txt, skipcomments=skipcomments): if t == 'card': t = b yield Card(lines=c, position=n0 + n, type=t) if __name__ == '__main__': from sys import argv import utils input = MIP(argv[1]) print 'Start cycling cards', datetime.now().isoformat() for c in input.cards(): print c.position pass print 'End cycling cards', datetime.now().isoformat() exit(0) # print blocs for b, l, txt in input.blocks(): print b, l, utils.shorten(repr(txt)) # split cards to parts for c in input.cards(blocks='csd', skipcomments=True): print '*' * 60 print c.position print c.content() print c.parts()
# Read ECG data from .asc file ecg_data = read_csv(os.path.join(MEDIANS_PATH, ecg_file), delimiter=' ', transpose=True, skip_header=False, dtype=np.int) # Normalize ECG data between -1 and 1 ecg_normalized = normalize(ecg_data) # Scale normlaized ECG data between -Y_MAX and Y_MAX ecg_scaled = np.array(ecg_normalized * Y_MAX, dtype=int) # Reduce the length of ECG data by dropping every other element ecg_reduced = shorten(ecg_scaled) # Remove the first n values to fit within the X_MAX limit if len(ecg_reduced[0]) > X_MAX: ecg_reduced = [ ecg_values[len(ecg_values) - X_MAX:] for ecg_values in ecg_reduced ] # Generate ECG plot generate_ecg_plot(ecg_data, ecg_id, file_path) plot_image = Image.open(file_path).convert('L') plot_image = np.array(plot_image) # plot_image = plot_image[~np.all(plot_image == 255, axis=1)]
async def on_message(message): HV_SERVER = 603053441157169155 HV_BOT_CH = 634255830094446592 TEST_SERVER = 395741640372912138 message.content = message.content.lower() # Allow only HV server and exclude self if (message.author.id == client.user.id) or (message.guild and (not message.guild.id == HV_SERVER and not message.guild.id == TEST_SERVER)): return print(message.author.name, message.content) ### Item previews --- id = str(message.author.id) prefs = json.load(open("./cache/preferences.json")) pp = False if id in prefs and prefs[id]["pretty_preview"]: pp = True # Look thru message for HV links and generate preview embeds, text = scanMsg(message.content, message, valType=2, prettyPrint=pp) if text or embeds: print(message.content, f"{len(embeds)} embeds", text) MSG_LOG[message.id] = { "message": message, "type": 2, "responses_text": [], "responses_embed": [] } if text: msg = await message.channel.send(content=text) MSG_LOG[message.id]["responses_text"].append(msg) for embed in embeds: msg = await message.channel.send(embed=embed) MSG_LOG[message.id]["responses_embed"].append(msg) await message.add_reaction("🔄") ### --- ### Setting commands --- # Set preview image limit if message.content.startswith("h!limit"): split = message.content.split() try: CONFIG["EMBED_LIMIT"] = int(split[1]) updateConfigFile() await message.channel.send( f"Max equipment embeds generated per message set to {CONFIG['EMBED_LIMIT']}" ) except Exception as e: await message.channel.send(str(e)) # Set preview preferences if message.content.startswith("!prettyon") or message.content.startswith( "!fancyon"): try: if id not in prefs: prefs[id] = CONFIG['BLANK_PREFS'] prefs[id]['name'] = message.author.name prefs[id]['pretty_preview'] = True await message.channel.send( f"Current preferences:\n{json.dumps(prefs[id],indent=2)}") with open("./cache/preferences.json", 'w') as outfile: outfile.write(json.dumps(prefs, indent=2)) except Exception as e: await message.channel.send(str(e)) if message.content.startswith("!prettyoff") or message.content.startswith( "!fancyoff"): try: if id not in prefs: prefs[id] = CONFIG['BLANK_PREFS'] prefs[id]['name'] = message.author.name prefs[id]['pretty_preview'] = False await message.channel.send( f"Current preferences:\n{json.dumps(prefs[id],indent=2)}") with open("./cache/preferences.json", 'w') as outfile: outfile.write(json.dumps(prefs, indent=2)) except Exception as e: await message.channel.send(str(e)) ### --- ### Begin command dump if message.content.startswith("!help") or message.content.startswith( "!!help"): await message.channel.send( "**`!auction` `{query}`** - Returns a list of matching items and prices from super's + kedama's auction." + #"\n**`!wts` `{query}`** - Returns a list of matching items and prices from the WTS forum." + #"\n**`!wtb` `{query}`** - Returns a list of matching items and prices from the WTB forum." + "\n**`!litem` `{query}`** - Lotto items." + "\n**`!lwinner` `{query}`** - Lotto winners." + "\n**`!prettyon`** - Fancy equip previews by default." + "\n**`!prettyyoff`** - Ugly equip previews by default." + "\n\nFor equip previews, prefixing the link with ! will switch between the pretty modes. Reacting to the original links with :poop: will also delete the previews." + "\n\nFor searches, various keywords / params I'll document later: `nolink`, `link`, `date2019`, `min6969k`, `max10m`, `rare`, `norare`" ) if message.content.startswith("!auction") or message.content.startswith( "!auc"): keywords = { "date": None, "rare": None, "norare": None, "min": None, "max": None, "nomerge": None } aliases = {"date": ["year"], "date20": ["20"]} for key in aliases: for x in aliases[key]: message.content = message.content.replace(" " + x, " " + key) split = message.content.lower().split()[1:] for key in keywords: matches = [x for x in split if x.startswith(key)] if matches: keywords[key] = matches[0].replace(key, "") if keywords[key] == "": keywords[key] = True [split.remove(x) for x in matches] if not split: return if message.guild and message.channel.id != HV_BOT_CH and message.guild.id == HV_SERVER: return #async with message.channel.typing(): nolink = "link" not in message.content # Year filter year = None if keywords["date"]: try: year = int(keywords["date"]) except: await message.channel.send( f"Warning: {keywords['date']} is not a valid year for `date`." ) # Rare filter rare = None if keywords["rare"]: rare = True elif keywords["norare"]: rare = False # Min filter def toint(x): ix = (x.lower().replace("k", "000").replace("m", "000000")) print(ix) if "." in x: numDec = len( str(x).replace("k", "").replace("m", "").split(".")[1]) print(numDec) ix = ix[:-numDec].replace(".", "") print(ix) if not ix: ix = 0 try: return int(ix) except: return 0 min = None if keywords["min"]: try: min = toint(keywords["min"]) except: await message.channel.send( f"Warning: {keywords['min']} is not a valid number for `min`." ) # Min filter max = None if keywords["max"]: try: max = toint(keywords["max"]) except: await message.channel.send( f"Warning: {keywords['max']} is not a valid number for `max`." ) print(year, rare, min, max) h = ["Price", "Level", "Percentiles", "# Auction / Date"] if not nolink: #split.remove("nolink") split.remove("link") #headers.remove("Link") result = utils.superSearch(" ".join(split), year=year, rare=rare, min=min, max=max, nomerge=keywords["nomerge"]) if not result: return await message.channel.send("No results found.") links = {} slinks = {} headers = {} for item in result: headers[item] = h if "Peerless" in item: headers[item] = h[:2] + h[3:] result[item] = [x[:2] + x[3:] for x in result[item]] links[item] = [(result[item][x][-2], result[item][x][-1]) for x in range(len(result[item]))] slinks[item] = [utils.shorten(x[0], x[1]) for x in links[item]] result[item] = [x[:-2] for x in result[item]] if nolink: text = "```py\n" else: text = "" for item in result: if nolink: text += "@ " + item + "\n" else: text += "**" + item + "**\n" if nolink: text += utils.pprint(result[item], headers=headers[item]) else: text += utils.pprint(result[item], headers=headers[item], quoteWrap=True, links=slinks[item]) text += "\n\n" text = text[:-2] if nolink: text += "```" #print(text) if nolink: text = utils.breakMessage(text, codeblock=True, lang="py") else: text = utils.breakMessage(text, codeblock=False) print(json.dumps(text, indent=2)) lim = 2 if message.guild: tx = text[:lim] else: tx = text for msg in tx: #print("msg",msg) if not msg: continue await message.channel.send(msg) await asyncio.sleep(.5) if message.guild and len(text) > lim: await message.channel.send( f"{len(text) - lim} additional pages omitted.") if message.content.startswith("!lwinners") or message.content.startswith( "!lwin") or message.content.startswith("!luser"): split = message.content.lower().split()[1:] if not split: return if message.guild and message.channel.id != HV_BOT_CH and message.guild.id == HV_SERVER: return result, stats = utils.searchLottoWinners(" ".join(split)) if not result: return await message.channel.send("No results found.") statLst = [] for key in stats: statLst.append([key] + stats[key]) print(statLst) text = "```py\n" + utils.pprint( statLst, headers=["Item", "Total Count", "Wins"]) + "```\n" text += "```py\n" + utils.pprint( result, headers=["Prize", "Ticket Pool", "# Lotto / Date" ]) + "\n```" print(text) text = utils.breakMessage(text, codeblock=True, lang="py") lim = 2 if message.guild: tx = text[:lim] else: tx = text for msg in tx: if not msg: continue await message.channel.send(msg) await asyncio.sleep(.5) if message.guild and len(text) > lim: await message.channel.send( f"{len(text) - lim} additional pages omitted.") if message.content.startswith("!litem") or message.content.startswith( "!lit"): split = message.content.lower().split()[1:] if not split: return if message.guild and message.channel.id != HV_BOT_CH and message.guild.id == HV_SERVER: return result = utils.searchLottoItems(" ".join(split)) if not result: return await message.channel.send("No results found.") text = "```py\n" + utils.pprint( result, headers=["Prize", "Winner", "Ticket Pool", "# Lotto / Date" ]) + "\n```" print(text) text = utils.breakMessage(text, codeblock=True, lang="py") lim = 1 if message.guild: tx = text[:lim] else: tx = text for msg in tx: if not msg: continue await message.channel.send(msg) await asyncio.sleep(.5) if message.guild and len(text) > lim: await message.channel.send( f"{len(text) - lim} additional pages omitted.") if message.content.startswith("!kauc"): split = message.content.lower().split()[1:] if not split: return if message.guild and message.channel.id != HV_BOT_CH and message.guild.id == HV_SERVER: return result = utils.ksearch(" ".join(split)) if not result: return await message.channel.send("No results found.") print(result) tmp = [] for item in result: text = "```py\n" text += "@ " + item + "\n" text += utils.pprint(result[item], headers=["Level", "Price", "# Auction"]) text += "\n```" tmp.append(text) print(text) text = "\n".join(tmp) text = utils.breakMessage(text, codeblock=True, lang="py") lim = 1 if message.guild: tx = text[:lim] else: tx = text for msg in tx: # print("msg",msg) if not msg: continue await message.channel.send(msg) await asyncio.sleep(.5) if message.guild and len(text) > lim: await message.channel.send( f"{len(text) - lim} additional pages omitted.")
def shorten(name): """URL shortener handler""" return utils.shorten(name)
def request(self, req): GET = (req.method == 'GET') POST = (req.method == 'POST') # routing if GET and req.url_path == '/': # serve the root document regardless of the iframes state if self.state == 'open': self.state = 'document_requested' data = ''.join(self.resources.get(None, [])) logger.debug("getting iframe root document: %s", repr((self.state, utils.shorten(data)))) msg = webkitwindow.Message(headers={ 'Cache-Control': 'no-cache', 'Content-Type': 'text/html; charset=utf-8' }, body=data) req.respond(status=(200, 'OK'), message=msg, streaming=(self.state == 'document_requested')) self._root_document = msg # use _root_document.write/.close when streaming # static and iframe-specific resources elif GET and req.url_path in self.static_resources: req.found_resource(self.static_resources[req.url_path], module_name='schirm.resources', modify_fn=lambda s: s % { 'websocket_uri': self.websocket_uri, 'comm_uri': self.comm_path }) elif GET and req.url_path in self.resources: req.found(**self.resources[req.url_path]) elif POST and req.url_path == self.comm_path: # receive commands from the iframe via plain plain HTTP req_bad = lambda msg="": req.respond( (400, 'Bad Request'), webkitwindow.Message(body=msg)) req_ok = lambda: req.respond((200, 'OK'), webkitwindow.Message()) try: data = json.loads(req.message.body) except ValueError, e: req_bad("Invalid JSON: %r" % str(e)) return None if isinstance(data, dict): cmd = data.get('command') if cmd == 'resize': if data.get('height') == 'fullscreen': height = 'fullscreen' else: # resize iframe to height try: height = int(data.get('height')) except: height = 'fullscreen' req_ok() return { 'name': 'iframe_resize', 'iframe_id': self.id, 'height': height } elif cmd == 'control-c': req_ok() return {u'name': u'keypress', u'key': 'control-c'} elif cmd == 'control-d': req_ok() return {u'name': u'keypress', u'key': 'control-d'} elif cmd == 'control-z': req_ok() return {u'name': u'keypress', u'key': 'control-z'} elif cmd == 'ping': # this command allows the iframe to check whether # its alive when not, Iframes.request will respond # with a 404 notfound upon accessing comm_path req_ok() else: req_bad("Invalid command: %r" % (cmd, )) else: req_bad("Not a dictionary: %r" % (data, ))
def request(self, req): GET = (req.method == 'GET') POST = (req.method == 'POST') # routing if GET and req.url_path == '/': # serve the root document regardless of the iframes state if self.state == 'open': self.state = 'document_requested' data = ''.join(self.resources.get(None, [])) logger.debug("getting iframe root document: %s", repr((self.state, utils.shorten(data)))) msg = webkitwindow.Message(headers={'Cache-Control': 'no-cache', 'Content-Type': 'text/html; charset=utf-8'}, body=data) req.respond(status=(200, 'OK'), message=msg, streaming=(self.state == 'document_requested')) self._root_document = msg # use _root_document.write/.close when streaming # static and iframe-specific resources elif GET and req.url_path in self.static_resources: req.found_resource(self.static_resources[req.url_path], module_name='schirm.resources', modify_fn=lambda s: s % {'websocket_uri': self.websocket_uri, 'comm_uri': self.comm_path}) elif GET and req.url_path in self.resources: req.found(**self.resources[req.url_path]) elif POST and req.url_path == self.comm_path: # receive commands from the iframe via plain plain HTTP req_bad = lambda msg="": req.respond((400, 'Bad Request'), webkitwindow.Message(body=msg)) req_ok = lambda: req.respond((200, 'OK'), webkitwindow.Message()) try: data = json.loads(req.message.body) except ValueError, e: req_bad("Invalid JSON: %r" % str(e)) return None if isinstance(data, dict): cmd = data.get('command') if cmd == 'resize': if data.get('height') == 'fullscreen': height = 'fullscreen' else: # resize iframe to height try: height = int(data.get('height')) except: height = 'fullscreen' req_ok() return {'name': 'iframe_resize', 'iframe_id': self.id, 'height':height} elif cmd == 'control-c': req_ok() return {u'name': u'keypress', u'key': 'control-c'} elif cmd == 'control-d': req_ok() return {u'name': u'keypress', u'key': 'control-d'} elif cmd == 'control-z': req_ok() return {u'name': u'keypress', u'key': 'control-z'} elif cmd == 'ping': # this command allows the iframe to check whether # its alive when not, Iframes.request will respond # with a 404 notfound upon accessing comm_path req_ok() else: req_bad("Invalid command: %r" % (cmd, )) else: req_bad("Not a dictionary: %r" % (data, ))
def view_keyword(keyword): deluxe = is_deluxe( current_user ) # users in the "deluxe" group can specify their own time period period, start, end, cache_time = get_period(request, "week") if period == "custom": if not deluxe: flash( "Deze functionaliteit is alleen beschikbaar voor goedgekeurde gebruikers.", "error") return redirect(url_for("horti.home")) if (end - start).days > 31: flash("Periode langer dan een maand is niet toegestaan", "error") return redirect(url_for("horti.home")) if start > end: flash("De einddatum moet na de begindatum zijn.", "error") return redirect(url_for("horti.home")) params = { "start": start.strftime(time_format), "end": end.strftime(time_format) } keyword_data = cache(process_details, keyword, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(keyword_data, Response): return keyword_data urls = keyword_data["URLs"][:16] for url in urls: url["display_url"] = shorten(url["link"], 80) del keyword_data["URLs"] keyword_data["tagCloud"] = keyword_data["tagCloud"][:200] photos = enumerate(keyword_data["photos"] ) # number of photo's is limited in processing.py del keyword_data["photos"] display_tweets = 11 max_tweets = 200 keyword_data["tweets"] = keyword_data["tweets"][:max_tweets] keyword_data["retweets"] = keyword_data["retweets"][:display_tweets] keyword_data["interaction_tweets"] = keyword_data[ "interaction_tweets"][:max_tweets] num_tweets = keyword_data["num_tweets"] del keyword_data["num_tweets"] graph = keyword_data["graph"] del keyword_data["graph"] polarity = keyword_data["polarity"] del keyword_data["polarity"] polarity_face = display_polarity(polarity) gtrends_period = { "day": "now 1-d", "week": "now 7-d", "month": "today 1-m" }.get(period, "now 1-d") period_name = { "day": "dag", "week": "week", "month": "maand" }.get(period, "dag") news = [] for item in keyword_data["news"]: item["pubdate"] = display_datetime(item["pubdate"]) del item["nid"] news.append(item) del keyword_data["news"] template_data = { "keyword": keyword, "keyword_data": json.dumps(keyword_data), "deluxe": deluxe, "num_tweets": display_number(num_tweets), "urls": urls, "graph": json.dumps(graph), "photos": photos, "display_tweets": display_tweets, "start": display_datetime(start), "end": display_datetime(end), "period": period, "period_name": period_name, "polarity": polarity, "polarity_face": polarity_face, "gtrends_period": gtrends_period, "news": news } return render_template("keyword.html", title=make_title(keyword), **template_data)
line += utils.nol(text, *bi[0]) bi.pop(0) # Define type of the first block, if not given explicitly if firstblock is None: if len(bi) == 1: firstblock = bid.d else: firstblock = bid.t i1, i2 = utils.newlineindex(text, bi[0][0]) bi.insert(0, (bi[0][0], i1)) bi[1] = (i2, bi[1][1]) cb = firstblock while bi: dres[bid[cb]] = bi[0], line line += utils.nol(text, *bi[0]) bi.pop(0) cb += 1 return dres if __name__ == '__main__': from sys import argv txt = open(argv[1], 'r').read() d = get_block_positions(txt) for k, (ii, l) in d.items(): s = txt[slice(*ii)] print k, l, utils.shorten(s)