def compute_handle_page_get(request, map): map = session.query(UrlMap).filter_by(url_map_id=map['url_map_id']).first() template_data = default_page_template_data(request, map) text_ads = map.page.text_ads.filter( or_(TextAds.expires==None, TextAds.expires<=datetime.utcnow()) ).all() adsense = True if map.page_id in no_adsense_ids: adsense = False stackvm = False if map.page_id in stackvm_ids: stackvm=True referer = request.headers.get('Referer', 'None') mobile = False if mobile_rx.search(referer): mobile = True return render_template("page", text_ads=text_ads, stackvm=stackvm, adsense=adsense, mobile=mobile, **template_data)
def validate_spam_comment(name, email, url, comment, ip): msg = """My anti-spam system says your comment looks spammy. I can't post it. If you're a real person and your comment is real, can you please email it to me at <a href="mailto:[email protected]">[email protected]</a>? I'll post your comment then and tune my anti-spam system not to match comments like these in the future. Thanks!""" for r in spamlist_names: if r.search(name): raise CommentError, msg for r in spamlist_emails: if r.search(email): raise CommentError, msg for r in spamlist_urls: if r.search(url): raise CommentError, msg for r in spamlist_comments: if r.search(comment): raise CommentError, msg msg2 = "I am sorry, please don't end your comment with a link. It's a common spam pattern I am seeing on my blog. Please add at least a dot at the end of the comment to avoid it being matched by this spam filter. Thanks!" if re.search("</a>$", comment): raise CommentError, msg2 if re.search("</a></strong>$", comment): raise CommentError, msg2 msg3 = """Please add <b>nospam</b> argument to your link. Here's an example:<br> <code><a href="http://digg.com" <b>nospam</b>>digg</a></code><br> This helps to keep spammers out. Thanks!""" if re.search("<a href", comment): if not re.search("<a href.*?nospam", comment): raise CommentError, msg3 msg4 = """My anti-spam system has seen this comment before. I can't post it.""" yesterday = datetime.utcnow() - timedelta(days=1) if session.query(Comment).filter(and_(Comment.comment == comment, Comment.timestamp >= yesterday)).count() > 0: raise CommentError, msg4 msg5 = """Your comment contains too many links. My anti-spam system doesn't let you post comments with more than 5 links.""" if comment.count("href") > 5: raise CommentError, msg5 msg6 = """You're commenting too much. You can post at most 5 comments per hour.""" hourago = datetime.utcnow() - timedelta(hours=1) visitor_ids = (visitor.visitor_id for visitor in session.query(Visitor).filter_by(ip=ip).all()) print visitor_ids if session.query(Comment).filter(and_(Comment.visitor_id.in_(visitor_ids), Comment.timestamp >= hourago)).count() > 5: raise CommentError, msg6
def compute_main(request, seo_name): # TODO: perhaps this query is not necessary tag = session.query(Tag).filter_by(seo_name=seo_name).first() if not tag: raise NotFound() pages = tag.blog_pages.order_by(BlogPage.publish_date.desc()).all() return render_template('tag', tag=tag, pages=pages, number_to_us=number_to_us)
def download_hits_handler(lexer, match): download_id = match.group(1) download = session.query(Download).filter_by(download_id=download_id).first() if not download: token_stream = lexer.download_error(download_id) else: token_stream = [(Token.Text, download.downloads)] for token, value in token_stream: yield 0, token, value
def compute_main(request, seo_name): category = session.query(Category).filter_by(seo_name=seo_name).first() if not category: raise NotFound() pages = category.blog_pages.order_by(BlogPage.publish_date.desc()).all() return render_template('category', category=category, pages=pages, number_to_us=number_to_us)
def find_url_map_compute(request_path): url_map = session.query(UrlMap).filter_by(request_path=request_path).first() if not url_map: return None return { 'url_map_id': url_map.url_map_id, 'request_path': url_map.request_path, 'page_id': url_map.page_id }
def add_tag(self, tag): real_tag = tag t = session.query(Tag).filter_by(seo_name=tag.seo_name).first() if t: real_tag = t real_tag.count = Tag.count + 1 else: real_tag.count = 1 self.tags.append(real_tag) self.save()
def delete_tag(self, tag_name): tag = session.query(Tag).filter_by(name=tag_name).first() if tag in self.tags: self.tags.remove(tag) self.save() if tag.count == 1: session.delete(tag) else: tag.count = Tag.count - 1 session.commit()
def handle_page_post(request, map): # Currently POST can only originate from a comment being submitted. map = session.query(UrlMap).filter_by(url_map_id=map['url_map_id']).first() if request.form.get('submit') is not None: return handle_comment_submit(request, map) if request.form.get('preview') is not None: return handle_comment_preview(request, map) raise NotFound()
def get_mixergy(page=1): # TODO: narrow down the query return ( session.query(Page, UrlMap) .join(BlogPage, UrlMap) .order_by(BlogPage.publish_date.desc()) .filter(BlogPage.visible == True) .limit(config.posts_per_page) .offset((page - 1) * config.posts_per_page) .all() )
def main(request, filename): download = session.query(Download).filter_by(filename=filename).first() if not download: # TODO: 'download you were looking for was not found, check out these downloads...' raise NotFound() try: file = open("%s/%s" % (config['download_path'], filename)) except IOError: # TODO: 'the file was not found, check this out' raise NotFound() download.another_download(request) return Response(wrap_file(request.environ, file), mimetype=download.mimetype, direct_passthrough=True)
def download_handler(lexer, match): download_id = match.group(1) download = session.query(Download).filter_by(download_id=download_id).first() if not download: token_stream = lexer.download_error(download_id) else: token_stream = [ (Token.Tag.InlineTag, '<a href="/download/%s" title="Download "%s"">' % \ (download.filename, download.title)), (Token.Text, download.title), (Token.Tag.Close, "</a>") ] for token, value in token_stream: yield 0, token, value
def compute_stackvm_get_page(request, map): map = session.query(UrlMap).filter_by(url_map_id=map['url_map_id']).first() template_data = default_page_template_data(request, map) text_ads = map.page.text_ads.filter( or_(TextAds.expires==None, TextAds.expires<=datetime.utcnow()) ).all() referer = request.headers.get('Referer', 'None') mobile = False if mobile_rx.search(referer): mobile = True return display_page( text_ads=text_ads, mobile=mobile, stackvm=True, stackvm_signup=request.args.get('signup'), stackvm_signup_error=request.args.get('error'), **template_data )
def default_comment_template_data(request, comment_id): mixergy = session.query(Comment, Page, UrlMap).join(Page, UrlMap).filter(Comment.comment_id == comment_id).first() if not mixergy: # TODO: "The requested comment was not found, here are a few latest coments" # "Here are latest posts, here are most commented posts..." raise NotFound() comment, page, urlmap = mixergy template_data = { "page": page, "page_path": urlmap.request_path, "comment_submit_path": "/c/%d?reply" % comment_id, "comment_parent_id": comment_id, "comment": comment, "form": request.form, "lynx": lynx_browser(request), } return template_data
def get_comment(id): return session.query(Comment).filter_by(comment_id=id).first()
def find_redirect_compute(request_path): return session.query(Redirect).filter_by(old_path=request_path).first()
import sys sys.path.append('.') from catonmat.models import Comment from catonmat.database import session from catonmat.comment_spamlist import spamlist_names, spamlist_urls, spamlist_emails, spamlist_comments comments = session.query(Comment).all() for c in comments: for r in spamlist_names: if r.search(c.name): print "Comment %d matches name %s" % (c.comment_id, c.name.encode('utf8')) for r in spamlist_emails: if r.search(c.email): print "Comment %d matches email %s" % (c.comment_id, c.email) for r in spamlist_urls: if r.search(c.website): print "Comment %d matches website %s" % (c.comment_id, c.website) for r in spamlist_comments: if r.search(c.comment): print "Comment %d matches comment %s..." % (c.comment_id, c.comment[0:50])
def compute_list(request): tags = session.query(Tag).order_by(Tag.name).all() return render_template('tag_list', tags=tags)
def comment_count(self): return session.query(Comment).filter_by(page_id=self.page_id).count()
def validate_parent_id(parent_id): if parent_id: comments = session.query(Comment).filter_by(comment_id=parent_id).count() if comments != 1: raise CommentError, "Something went wrong, the comment you were responding to was not found..."
def total_blogpages(): return session.query(BlogPage).filter(BlogPage.visible == True).count()
def invalidate_page_cache(page_id): if config.use_cache: page = session.query(Page).filter_by(page_id=page_id).first() cache_del('individual_page_%s' % page.request_path)
def validate_page_id(page_id): number_of_pages = session.query(Page).filter_by(page_id=page_id).count() if number_of_pages != 1: raise CommentError, "Something went wrong, the page you were commenting on was not found..."
def compute_page_list(): posts = ( session.query(Page).join(BlogPage).order_by(BlogPage.publish_date.desc()).filter(BlogPage.visible == True).all() ) return render_template("page_list", posts=posts, pagination=Pagination(1, total_blogpages(), config.posts_per_page))