def get_rss_entries(url, prevtime=None): import feedparser import sys sys.path.insert(0, '..') from lib import mb_code, strip_tags try: d = feedparser.parse(url) except: return [] statuses = [] for e in d.entries: title = mb_code(e.title) href = mb_code(e.links[0]['href']) try: content = mb_code(e.content[0].value) content = strip_tags(content) except: content = '' try: publishtime = e.published_parsed except: publishtime = e.updated_parsed msg = { 'title': title, 'url': href, 'content': content, } if prevtime is None or publishtime > prevtime: statuses.append((msg, publishtime)) return statuses
def say(): s = request.environ.get('beaker.session') access_token = s.get('access_token', '') if not access_token: toggle_notice('还未登录, <a href="/auth"><img src="/static/login240.png" /></a> ') redirect('/') if not check_formhash(): abort(403, 'how you get here?') o = lib.get_user_api(access_token) word = request.POST.get('word', '') word = " ".join(word.split()) wordlen = len(lib.mb_code(word, 'utf-8').decode('utf-8')) toweiqun = request.POST.get('toweiqun', '') if wordlen == 0 or wordlen > 140: abort(403, 'invalid status') try: api = lib.get_api() api.update_status(word) toggle_notice('小纸条已经丢进树洞') if toweiqun == 'on': lib.appendto( lib.get_timed_path('toweiqun'), word + "\n", ) return { 'uid': s.get('uid', ''), 'name': s.get('name',''), 'notice': toggle_notice(), 'formhash': set_formhash(), } except Exception, e: return "error: " + str(e)
def get_rss_entries(url, prevtime=None, nhead=0): import feedparser import sys import time sys.path.insert(0, '..') from lib import mb_code, strip_tags try: d = feedparser.parse(url) except: return [] statuses = [] for e in d.entries: title = mb_code(e.title) href = mb_code(e.links[0]['href']) try: content = mb_code(e.content[0].value) content = strip_tags(content) except: content = '' publishtime = getattr(e, 'published_parsed', getattr( e, 'updated_parsed', None, )) msg = { 'title': title, 'url': href, 'content': content, 'entry': e, } for i in e: msg[i] = mb_code(e[i]) if isinstance(e[i], basestring) else e[i] if prevtime is None or publishtime is None or publishtime > prevtime: statuses.append((msg, publishtime)) if nhead > 0: statuses = statuses[:nhead] return statuses
def get_rss_entries(url, prevtime=None, nhead=0): import feedparser import sys import time sys.path.insert(0, '..') from lib import mb_code, strip_tags try: d = feedparser.parse(url) except: return [] statuses = [] for e in d.entries: title = mb_code(e.title) href = mb_code(e.links[0]['href']) try: content = mb_code(e.content[0].value) content = strip_tags(content) except: content = '' publishtime = getattr( e, 'published_parsed', getattr( e, 'updated_parsed', None, ) ) msg = { 'title': title, 'url': href, 'content': content, 'entry': e, } for i in e: msg[i] = mb_code(e[i]) if isinstance(e[i], basestring) else e[i] if prevtime is None or publishtime is None or publishtime > prevtime: statuses.append((msg, publishtime)) if nhead > 0: statuses = statuses[:nhead] return statuses
def get_twitter_status(username, prevtime=None): from lib import mb_code from datetime import datetime ptime = prevtime if prevtime is not None else False url = 'https://api.twitter.com/1/statuses/user_timeline.rss?screen_name=%s' % ( username) try: data = get(url).body except: return [] from xml.dom import minidom try: tree = minidom.parseString(data) except: return [] desc = tree.getElementsByTagName('description')[1:] date = tree.getElementsByTagName('pubDate') statuses = [] lst = range(len(desc) - 1, -1, -1) prefix = '%s: ' % username prefix_len = len(prefix) for i in lst: try: status = mb_code(decodeHtmlentities(desc[i].childNodes[0].data)) if status.startswith(prefix): status = status[prefix_len:] except: continue pubdate = mb_code(date[i].childNodes[0].data) pubdate = datetime.strptime(pubdate, '%a, %d %b %Y %H:%M:%S +0000') if ptime is False or pubdate > ptime: if unshorten_prefix: status = unshortenstatus(status, unshorten_re) if use_shurl: status = shurl_status(status) statuses.append((status, pubdate)) return statuses
def get_rss_entries(url, prevtime=None): import feedparser import sys sys.argv.insert(0, '..') from lib import mb_code try: d = feedparser.parse(url) except: return [] statuses = [] for e in d.entries: title = mb_code(e.title) href = mb_code(e.links[0]['href']) publishtime = e.updated_parsed msg = {'title': title, 'url': href} if prevtime is None or publishtime > prevtime: statuses.append((msg, publishtime)) return statuses
def get_twitter_status(username, prevtime=None): from lib import mb_code from datetime import datetime ptime = prevtime if prevtime is not None else False url = 'https://api.twitter.com/1/statuses/user_timeline.rss?screen_name=%s' % (username) try: data = get(url).body except: return [] from xml.dom import minidom try: tree = minidom.parseString(data) except: return [] desc = tree.getElementsByTagName('description')[1:] date = tree.getElementsByTagName('pubDate') statuses = [] lst = range(len(desc)-1, -1, -1) prefix = '%s: ' % username prefix_len = len(prefix) for i in lst: try: status = mb_code(decodeHtmlentities(desc[i].childNodes[0].data)) if status.startswith(prefix): status = status[prefix_len:] except: continue pubdate = mb_code(date[i].childNodes[0].data) pubdate = datetime.strptime(pubdate , '%a, %d %b %Y %H:%M:%S +0000') if ptime is False or pubdate > ptime: if unshorten_prefix: status = unshortenstatus(status, unshorten_re) if use_shurl: status = shurl_status(status) statuses.append((status, pubdate)) return statuses
def get_twitter_status(username, prevtime=None): import sys sys.path.append('..') from lib import mb_code from datetime import datetime if prevtime is not None: ptime = datetime.strptime(prevtime, '%a, %d %b %Y %H:%M:%S +0000') else: ptime = False url = 'http://twitter.com/statuses/user_timeline/%s.rss' % username data = fetch(url) if not data: return [] from xml.dom import minidom try: tree = minidom.parseString(data) except: return [] desc = tree.getElementsByTagName('description')[1:] date = tree.getElementsByTagName('pubDate') statuses = [] lst = range(len(desc)-1, -1, -1) prefix = '%s: ' % username prefix_len = len(prefix) for i in lst: try: status = decodeHtmlentities(mb_code(desc[i].childNodes[0].data)) if status.startswith(prefix): status = status[prefix_len:] except: continue pubdate = mb_code(date[i].childNodes[0].data) if ptime is False or datetime.strptime(pubdate , '%a, %d %b %Y %H:%M:%S +0000') > ptime: status = unshortenstatus(status, unshorten_re) statuses.append((status, pubdate)) return statuses
token = request.GET.get('oauth_token') verifier = request.GET.get('oauth_verifier') o = lib.get_oauth_handler() o.set_request_token(request_token.key, request_token.secret) try: access_token = o.get_access_token(verifier) except Exception, e: abort(500, 'check access_token failed') o = lib.get_user_api(access_token) try: user = o.verify_credentials() s['uid'] = str(user.id) s['name'] = lib.mb_code(user.name) except: abort(500, 'can not get user info') s['access_token'] = access_token s.save() redirect('/say') @route('/') @view('index') def root(): s = request.environ.get('beaker.session') access_token = s.get('access_token', '') if not access_token: return {