def listing(self): self.things, prev, next, bcount, acount = self.get_items() self._max_num = max(acount, bcount) self.after = None self.before = None if self.nextprev and self.prev_link and prev and bcount > 1: p = request.get.copy() p.update({ 'after': None, 'before': prev._fullname, 'count': bcount }) self.before = prev._fullname self.prev = (request.path + utils.query_string(p)) p_first = request.get.copy() p_first.update({'after': None, 'before': None, 'count': None}) self.first = (request.path + utils.query_string(p_first)) if self.nextprev and self.next_link and next: p = request.get.copy() p.update({ 'after': next._fullname, 'before': None, 'count': acount }) self.after = next._fullname self.next = (request.path + utils.query_string(p)) #TODO: need name for template -- must be better way return Wrapped(self)
def listing(self, next_suggestions=None): self.things, prev, next, bcount, acount = self.get_items() self.next_suggestions = next_suggestions self._max_num = max(acount, bcount) self.after = None self.before = None if self.nextprev and self.prev_link and prev and bcount > 1: p = self.params.copy() p.update({'after':None, 'before':prev._fullname, 'count':bcount}) self.before = prev._fullname self.prev = (request.path + utils.query_string(p)) p_first = self.params.copy() p_first.update({'after':None, 'before':None, 'count':None}) self.first = (request.path + utils.query_string(p_first)) if self.nextprev and self.next_link and next: p = self.params.copy() p.update({'after':next._fullname, 'before':None, 'count':acount}) self.after = next._fullname self.next = (request.path + utils.query_string(p)) for count, thing in enumerate(self.things): thing.rowstyle_cls = getattr(thing, 'rowstyle_cls', "") thing.rowstyle_cls += ' ' + ('even' if (count % 2) else 'odd') thing.rowstyle = CachedVariable("rowstyle") #TODO: need name for template -- must be better way return Wrapped(self)
def GET_search(self, query, num, reverse, after, count, sort): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme = True) if url: return self.redirect("/submit" + query_string({'url':url})) q = IndextankQuery(query, c.site, sort) num, t, spane = self._search(q, num = num, after = after, reverse = reverse, count = count) if not isinstance(c.site,FakeSubreddit) and not c.cname: all_reddits_link = "%s/search%s" % (subreddit.All.path, query_string({'q': query})) d = {'reddit_name': c.site.name, 'reddit_link': "http://%s/"%get_domain(cname = c.cname), 'all_reddits_link': all_reddits_link} infotext = strings.searching_a_reddit % d else: infotext = None res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus = [SearchSortMenu(default=sort)], search_params = dict(sort = sort), infotext = infotext).render() return res
def listing(self, next_suggestions=None): self.things, prev, next, bcount, acount = self.get_items() self.next_suggestions = next_suggestions self._max_num = max(acount, bcount) self.after = None self.before = None if self.nextprev and self.prev_link and prev and bcount > 1: p = request.GET.copy() p.update({"after": None, "before": prev._fullname, "count": bcount}) self.before = prev._fullname self.prev = request.path + utils.query_string(p) p_first = request.GET.copy() p_first.update({"after": None, "before": None, "count": None}) self.first = request.path + utils.query_string(p_first) if self.nextprev and self.next_link and next: p = request.GET.copy() p.update({"after": next._fullname, "before": None, "count": acount}) self.after = next._fullname self.next = request.path + utils.query_string(p) for count, thing in enumerate(self.things): thing.rowstyle = getattr(thing, "rowstyle", "") thing.rowstyle += " " + ("even" if (count % 2) else "odd") # TODO: need name for template -- must be better way return Wrapped(self)
def listing(self, next_suggestions=None): self.things, prev, next, bcount, acount = self.get_items() self.next_suggestions = next_suggestions self._max_num = max(acount, bcount) self.after = None self.before = None if self.nextprev and self.prev_link and prev and bcount > 1: p = self.params.copy() p.update({"after": None, "before": prev._fullname, "count": bcount}) self.before = prev._fullname self.prev = request.path + utils.query_string(p) p_first = self.params.copy() p_first.update({"after": None, "before": None, "count": None}) self.first = request.path + utils.query_string(p_first) if self.nextprev and self.next_link and next: p = self.params.copy() p.update({"after": next._fullname, "before": None, "count": acount}) self.after = next._fullname self.next = request.path + utils.query_string(p) for count, thing in enumerate(self.things): thing.rowstyle_cls = getattr(thing, "rowstyle_cls", "") thing.rowstyle_cls += " " + ("even" if (count % 2) else "odd") thing.rowstyle = CachedVariable("rowstyle") survey_action = c.cookies.get("survey_action") if feature.is_enabled("show_survey") and not survey_action and g.live_config["survey_info"]: self.survey = ast.literal_eval(g.live_config["survey_info"]) self.show_survey = True # TODO: need name for template -- must be better way return Wrapped(self)
def listing(self, next_suggestions=None): self.things, prev, next, bcount, acount = self.get_items() self.next_suggestions = next_suggestions self._max_num = max(acount, bcount) self.after = None self.before = None if self.nextprev and self.prev_link and prev and bcount > 1: p = request.GET.copy() p.update({'after':None, 'before':prev._fullname, 'count':bcount}) self.before = prev._fullname self.prev = (request.path + utils.query_string(p)) p_first = request.GET.copy() p_first.update({'after':None, 'before':None, 'count':None}) self.first = (request.path + utils.query_string(p_first)) if self.nextprev and self.next_link and next: p = request.GET.copy() p.update({'after':next._fullname, 'before':None, 'count':acount}) self.after = next._fullname self.next = (request.path + utils.query_string(p)) for count, thing in enumerate(self.things): thing.rowstyle = getattr(thing, 'rowstyle', "") thing.rowstyle += ' ' + ('even' if (count % 2) else 'odd') #TODO: need name for template -- must be better way return Wrapped(self)
def run(self, password = None): if not c.user_is_loggedin: #TODO return a real error page d = dict(dest=reddit_link(request.path, url = True) + utils.query_string(request.GET)) return redirect_to("/login" + utils.query_string(d)) if (password is not None) and not valid_password(c.user, password): c.errors.add(errors.WRONG_PASSWORD)
def GET_search(self, query, num, time, reverse, after, count, langs, sort): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({'url': url})) if langs and self.verify_langs_regex.match(langs): langs = langs.split(',') else: langs = c.content_langs subreddits = None authors = None if c.site == subreddit.Friends and c.user_is_loggedin and c.user.friends: authors = c.user.friends elif isinstance(c.site, MultiReddit): subreddits = c.site.sr_ids elif not isinstance(c.site, FakeSubreddit): subreddits = [c.site._id] q = LinkSearchQuery( q=query, timerange=time, langs=langs, subreddits=subreddits, authors=authors, sort=SearchSortMenu.operator(sort)) num, t, spane = self._search( q, num=num, after=after, reverse=reverse, count=count) if not isinstance(c.site, FakeSubreddit) and not c.cname: all_reddits_link = "%s/search%s" % (subreddit.All.path, query_string({ 'q': query })) d = { 'reddit_name': c.site.name, 'reddit_link': "http://%s/" % get_domain(cname=c.cname), 'all_reddits_link': all_reddits_link } infotext = strings.searching_a_reddit % d else: infotext = None res = SearchPage( _('search results'), query, t, num, content=spane, nav_menus=[TimeMenu(default=time), SearchSortMenu(default=sort)], search_params=dict(sort=sort, t=time), infotext=infotext).render() return res
def GET_search(self, query, num, time, reverse, after, count, langs, sort): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({'url': url})) if langs and self.verify_langs_regex.match(langs): langs = langs.split(',') else: langs = c.content_langs subreddits = None authors = None if c.site == subreddit.Friends and c.user_is_loggedin and c.user.friends: authors = c.user.friends elif isinstance(c.site, MultiReddit): subreddits = c.site.sr_ids elif not isinstance(c.site, FakeSubreddit): subreddits = [c.site._id] q = LinkSearchQuery(q=query, timerange=time, langs=langs, subreddits=subreddits, authors=authors, sort=SearchSortMenu.operator(sort)) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) if not isinstance(c.site, FakeSubreddit) and not c.cname: all_reddits_link = "%s/search%s" % (subreddit.All.path, query_string({'q': query})) d = { 'reddit_name': c.site.name, 'reddit_link': "http://%s/" % get_domain(cname=c.cname), 'all_reddits_link': all_reddits_link } infotext = strings.searching_a_reddit % d else: infotext = None res = SearchPage( _('search results'), query, t, num, content=spane, nav_menus=[TimeMenu(default=time), SearchSortMenu(default=sort)], search_params=dict(sort=sort, t=time), infotext=infotext).render() return res
def listing(self): self.things, prev, next, bcount, acount = self.get_items() self.max_num = max(acount, bcount) if self.nextprev and self.prev_link and prev and bcount > 1: p = request.get.copy() p.update({'after':None, 'before':prev._fullname, 'count':bcount}) self.prev = (request.path + utils.query_string(p)) if self.nextprev and self.next_link and next: p = request.get.copy() p.update({'after':next._fullname, 'before':None, 'count':acount}) self.next = (request.path + utils.query_string(p)) #TODO: need name for template -- must be better way return Wrapped(self)
def GET_s(self, urloid): """/s/http://..., show a given URL with the toolbar. if it's submitted, redirect to /tb/$id36""" force_html() path = demangle_url(request.fullpath) if not path: # it was malformed self.abort404() # if the domain is shame-banned, bail out. if is_shamed_domain(path)[0]: self.abort404() listing = hot_links_by_url_listing(path, sr=c.site, num=1) link = listing.things[0] if listing.things else None if link: # we were able to find it, let's send them to the # toolbar (if enabled) or comments (if not) return self.redirect(add_sr("/tb/" + link._id36)) else: # It hasn't been submitted yet. Give them a chance to qs = utils.query_string({"url": path}) return self.redirect(add_sr("/submit" + qs))
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme = True) if url: return self.redirect("/submit" + query_string({'url':url})) if not restrict_sr: site = DefaultSR() else: site = c.site try: q = IndextankQuery(query, site, sort) num, t, spane = self._search(q, num = num, after = after, reverse = reverse, count = count) res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus = [SearchSortMenu(default=sort)], search_params = dict(sort = sort), simple=False, site=c.site, restrict_sr=restrict_sr).render() return res except (IndextankException, socket.error), e: return self.search_fail(e)
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({'url': url})) if not restrict_sr: site = DefaultSR() else: site = c.site try: q = IndextankQuery(query, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus=[SearchSortMenu(default=sort)], search_params=dict(sort=sort), simple=False, site=c.site, restrict_sr=restrict_sr).render() return res except (IndextankException, socket.error), e: return self.search_fail(e)
def GET_oldinfo(self, article, type, dest, rest=None, comment=''): """Legacy: supporting permalink pages from '06, and non-search-engine-friendly links""" if not (dest in ('comments','related','details')): dest = 'comments' if type == 'ancient': #this could go in config, but it should never change max_link_id = 10000000 new_id = max_link_id - int(article._id) return self.redirect('/info/' + to36(new_id) + '/' + rest) if type == 'old': new_url = "/%s/%s/%s" % \ (dest, article._id36, quote_plus(title_to_url(article.title).encode('utf-8'))) if not c.default_sr: new_url = "/r/%s%s" % (c.site.name, new_url) if comment: new_url = new_url + "/%s" % comment._id36 if c.extension: new_url = new_url + "/.%s" % c.extension new_url = new_url + query_string(request.get) # redirect should be smarter and handle extensions, etc. return self.redirect(new_url, code=301)
def GET_button_content(self, url, title, css, vote, newwindow, width, link): # no buttons on domain listings if isinstance(c.site, DomainSR): c.site = Default return self.redirect(request.path + query_string(request.GET)) #disable css hack if (css != 'http://blog.wired.com/css/diggsocial.css' and css != 'http://www.wired.com/css/diggsocial.css'): css = None if link: url = link.url wrapper = make_wrapper(Button if vote else ButtonNoBody, url = url, target = "_new" if newwindow else "_parent", title = title, vote = vote, bgcolor = c.bgcolor, width = width, css = css, button = self.buttontype()) l = self.get_wrapped_link(url, link, wrapper) res = l.render() c.response.content = spaceCompress(res) return c.response
def _sign_url(self, url, token): """Sign a url for imgix's secured sources. Based very heavily on the example code in the docs: http://www.imgix.com/docs/tutorials/securing-images Arguments: * url -- a UrlParser instance of the url to sign. This object may be modified by the function, so make a copy beforehand if that is a concern. * token -- a string token provided by imgix for request signing Returns a UrlParser instance with signing parameters. """ # Build the signing value signvalue = token + url.path if url.query_dict: signvalue += query_string(url.query_dict) # Calculate MD5 of the signing value. signature = hashlib.md5(signvalue).hexdigest() url.update_query(s=signature) return url
def GET_oldinfo(self, article, type, dest, rest=None, comment=''): """Legacy: supporting permalink pages from '06, and non-search-engine-friendly links""" if not (dest in ('comments', 'related', 'details')): dest = 'comments' if type == 'ancient': #this could go in config, but it should never change max_link_id = 10000000 new_id = max_link_id - int(article._id) return self.redirect('/info/' + to36(new_id) + '/' + rest) if type == 'old': new_url = "/%s/%s/%s" % \ (dest, article._id36, quote_plus(title_to_url(article.title).encode('utf-8'))) if not c.default_sr: new_url = "/r/%s%s" % (c.site.name, new_url) if comment: new_url = new_url + "/%s" % comment._id36 if c.extension: new_url = new_url + "/.%s" % c.extension new_url = new_url + query_string(request.get) # redirect should be smarter and handle extensions, etc. return self.redirect(new_url, code=301)
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr): """Search links page.""" if query and "." in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({"url": url})) if not restrict_sr: site = DefaultSR() else: site = c.site try: cleanup_message = None try: q = IndextankQuery(query, site, sort) if query: query = query.replace("proddit:", "reddit:") q = IndextankQuery(query, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) except InvalidIndextankQuery: # strip the query down to a whitelist cleaned = re.sub("[^\w\s]+", "", query) cleaned = cleaned.lower() # if it was nothing but mess, we have to stop if not cleaned.strip(): num, t, spane = 0, 0, [] cleanup_message = strings.completely_invalid_search_query else: q = IndextankQuery(cleaned, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) cleanup_message = strings.invalid_search_query % {"clean_query": cleaned} cleanup_message += " " cleanup_message += strings.search_help % {"search_help": self.search_help_page} if query: query = query.replace("reddit:", "proddit:") res = SearchPage( _("search results"), query, t, num, content=spane, nav_menus=[SearchSortMenu(default=sort)], search_params=dict(sort=sort), infotext=cleanup_message, simple=False, site=c.site, restrict_sr=restrict_sr, ).render() return res except (IndextankException, socket.error), e: return self.search_fail(e)
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr, syntax): """Search links page.""" if query and "." in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({"url": url})) if not restrict_sr: site = DefaultSR() else: site = c.site if not syntax: syntax = SearchQuery.default_syntax try: cleanup_message = None try: q = SearchQuery(query, site, sort, syntax=syntax) results, etime, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) except InvalidQuery: # Clean the search of characters that might be causing the # InvalidQuery exception. If the cleaned search boils down # to an empty string, the search code is expected to bail # out early with an empty result set. cleaned = re.sub("[^\w\s]+", " ", query) cleaned = cleaned.lower().strip() q = SearchQuery(cleaned, site, sort) results, etime, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) if cleaned: cleanup_message = strings.invalid_search_query % {"clean_query": cleaned} cleanup_message += " " cleanup_message += strings.search_help % {"search_help": self.search_help_page} else: cleanup_message = strings.completely_invalid_search_query res = SearchPage( _("search results"), query, etime, results.hits, content=spane, nav_menus=[SearchSortMenu(default=sort)], search_params=dict(sort=sort), infotext=cleanup_message, simple=False, site=c.site, restrict_sr=restrict_sr, syntax=syntax, converted_data=q.converted_data, facets=results.subreddit_facets, sort=sort, ).render() return res except SearchException + (socket.error,) as e: return self.search_fail(e)
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr, syntax): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme = True) if url: return self.redirect("/submit" + query_string({'url':url})) if not restrict_sr: site = DefaultSR() else: site = c.site if not syntax: syntax = SearchQuery.default_syntax try: cleanup_message = None try: q = SearchQuery(query, site, sort, syntax=syntax) num, t, spane = self._search(q, num=num, after=after, reverse = reverse, count = count) except InvalidQuery: # strip the query down to a whitelist cleaned = re.sub("[^\w\s]+", " ", query) cleaned = cleaned.lower() # if it was nothing but mess, we have to stop if not cleaned.strip(): num, t, spane = 0, 0, [] cleanup_message = strings.completely_invalid_search_query else: q = SearchQuery(cleaned, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) cleanup_message = strings.invalid_search_query % { "clean_query": cleaned } cleanup_message += " " cleanup_message += strings.search_help % {"search_help": self.search_help_page } res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus=[SearchSortMenu(default=sort)], search_params=dict(sort=sort), infotext=cleanup_message, simple=False, site=c.site, restrict_sr=restrict_sr, syntax=syntax, converted_data=q.converted_data ).render() return res except SearchException + (socket.error,) as e: return self.search_fail(e)
def build(self, base_path=''): params = dict(request.GET) if self.dest: params[self.query_param] = self.dest elif self.query_param in params: del params[self.query_param] self.base_path = base_path base_path += query_string(params) self.path = base_path.replace('//', '/')
def listing(self, next_suggestions=None): self.things, prev, next, bcount, acount = self.get_items() self.next_suggestions = next_suggestions self._max_num = max(acount, bcount) self.after = None self.before = None if self.nextprev and self.prev_link and prev and bcount > 1: p = self.params.copy() p.update({ 'after': None, 'before': prev._fullname, 'count': bcount }) self.before = prev._fullname self.prev = (request.path + utils.query_string(p)) p_first = self.params.copy() p_first.update({'after': None, 'before': None, 'count': None}) self.first = (request.path + utils.query_string(p_first)) if self.nextprev and self.next_link and next: p = self.params.copy() p.update({ 'after': next._fullname, 'before': None, 'count': acount }) self.after = next._fullname self.next = (request.path + utils.query_string(p)) for count, thing in enumerate(self.things): thing.rowstyle_cls = getattr(thing, 'rowstyle_cls', "") thing.rowstyle_cls += ' ' + ('even' if (count % 2) else 'odd') thing.rowstyle = CachedVariable("rowstyle") survey_action = c.cookies.get('survey_action') if (feature.is_enabled('show_survey') and not survey_action and g.live_config['survey_info']): self.survey = ast.literal_eval(g.live_config['survey_info']) self.show_survey = True #TODO: need name for template -- must be better way return Wrapped(self)
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({'url': url})) if not restrict_sr: site = DefaultSR() else: site = c.site try: cleanup_message = None try: q = IndextankQuery(query, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) except InvalidIndextankQuery: # delete special characters from the query and run again special_characters = '+-&|!(){}[]^"~*?:\\' translation = dict( (ord(char), None) for char in list(special_characters)) cleaned = query.translate(translation) q = IndextankQuery(cleaned, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) cleanup_message = _('I couldn\'t understand your query, ' + 'so I simplified it and searched for ' + '"%(clean_query)s" instead.') % { 'clean_query': cleaned } res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus=[SearchSortMenu(default=sort)], search_params=dict(sort=sort), infotext=cleanup_message, simple=False, site=c.site, restrict_sr=restrict_sr).render() return res except (IndextankException, socket.error), e: return self.search_fail(e)
def intermediate_redirect(cls, form_path): """ Generates a /login or /over18 redirect from the current fullpath, after having properly reformated the path via format_output_url. The reformatted original url is encoded and added as the "dest" parameter of the new url. """ from r2.lib.template_helpers import add_sr dest = cls.format_output_url(request.fullpath) path = add_sr(form_path + query_string({"dest": dest})) return cls.redirect(path)
def build(self, base_path=''): base_path = ("%s/%s/" % (base_path, self.dest)).replace('//', '/') self.bare_path = _force_unicode(base_path.replace('//', '/')).lower() self.bare_path = self.bare_path.rstrip('/') self.base_path = base_path if self.use_params: base_path += query_string(dict(request.GET)) # since we've been sloppy of keeping track of "//", get rid # of any that may be present self.path = base_path.replace('//', '/')
def intermediate_redirect(cls, form_path): """ Generates a /login or /over18 redirect from the current fullpath, after having properly reformated the path via format_output_url. The reformatted original url is encoded and added as the "dest" parameter of the new url. """ from r2.lib.template_helpers import add_sr params = dict(dest=cls.format_output_url(request.fullurl)) if c.extension == "widget" and request.GET.get("callback"): params['callback'] = request.GET.get("callback") path = add_sr(cls.format_output_url(form_path) + query_string(params)) abort(302, location=path)
def GET_search(self, query, num, reverse, after, count, sort): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme=True) if url: return self.redirect("/submit" + query_string({'url': url})) q = IndextankQuery(query, c.site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) if not isinstance(c.site, FakeSubreddit) and not c.cname: all_reddits_link = "%s/search%s" % (subreddit.All.path, query_string({'q': query})) d = { 'reddit_name': c.site.name, 'reddit_link': "http://%s/" % get_domain(cname=c.cname), 'all_reddits_link': all_reddits_link } infotext = strings.searching_a_reddit % d else: infotext = None res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus=[SearchSortMenu(default=sort)], search_params=dict(sort=sort), infotext=infotext).render() return res
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme = True) if url: return self.redirect("/submit" + query_string({'url':url})) if not restrict_sr: site = DefaultSR() else: site = c.site try: cleanup_message = None try: q = IndextankQuery(query, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse = reverse, count = count) except InvalidIndextankQuery: # strip the query down to a whitelist cleaned = re.sub("[^\w\s]+", "", query) cleaned = cleaned.lower() # if it was nothing but mess, we have to stop if not cleaned.strip(): num, t, spane = 0, 0, [] cleanup_message = strings.completely_invalid_search_query else: q = IndextankQuery(cleaned, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse=reverse, count=count) cleanup_message = strings.invalid_search_query % { "clean_query": cleaned } cleanup_message += " " cleanup_message += strings.search_help % {"search_help": self.search_help_page } res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus = [SearchSortMenu(default=sort)], search_params = dict(sort = sort), infotext=cleanup_message, simple=False, site=c.site, restrict_sr=restrict_sr).render() return res except (IndextankException, socket.error), e: return self.search_fail(e)
def intermediate_redirect(cls, form_path): """ Generates a /login or /over18 redirect from the current fullpath, after having properly reformated the path via format_output_url. The reformatted original url is encoded and added as the "dest" parameter of the new url. """ from r2.lib.template_helpers import add_sr params = dict(dest=cls.format_output_url(request.fullpath)) if c.extension == "widget" and request.GET.get("callback"): params["callback"] = request.GET.get("callback") path = add_sr(cls.format_output_url(form_path) + query_string(params)) return cls.redirect(path)
def intermediate_redirect(cls, form_path, sr_path=True, fullpath=None): """ Generates a /login or /over18 redirect from the specified or current fullpath, after having properly reformated the path via format_output_url. The reformatted original url is encoded and added as the "dest" parameter of the new url. """ from r2.lib.template_helpers import add_sr params = dict(dest=cls.format_output_url(fullpath or request.fullurl)) if c.extension == "widget" and request.GET.get("callback"): params['callback'] = request.GET.get("callback") path = add_sr(cls.format_output_url(form_path) + query_string(params), sr_path=sr_path) abort(302, location=path)
def GET_s(self, urloid): """/s/http://..., show a given URL with the toolbar. if it's submitted, redirect to /tb/$id36""" force_html() path = demangle_url(request.fullpath) if not path: # it was malformed self.abort404() # if the domain is shame-banned, bail out. if is_shamed_domain(path)[0]: self.abort404() listing = hot_links_by_url_listing(path, sr=c.site, num=1) link = listing.things[0] if listing.things else None if c.cname and not c.authorized_cname: # In this case, we make some bad guesses caused by the # cname frame on unauthorised cnames. # 1. User types http://foo.com/http://myurl?cheese=brie # (where foo.com is an unauthorised cname) # 2. We generate a frame that points to # http://www.reddit.com/r/foo/http://myurl?cnameframe=0.12345&cheese=brie # 3. Because we accept everything after the /r/foo/, and # we've now parsed, modified, and reconstituted that # URL to add cnameframe, we really can't make any good # assumptions about what we've done to a potentially # already broken URL, and we can't assume that we've # rebuilt it in the way that it was originally # submitted (if it was) # We could try to work around this with more guesses (by # having demangle_url try to remove that param, hoping # that it's not already a malformed URL, and that we # haven't re-ordered the GET params, removed # double-slashes, etc), but for now, we'll just refuse to # do this operation return self.abort404() if link: # we were able to find it, let's send them to the # toolbar (if enabled) or comments (if not) return self.redirect(add_sr("/tb/" + link._id36)) else: # It hasn't been submitted yet. Give them a chance to qs = utils.query_string({"url": path}) return self.redirect(add_sr("/submit?" + qs))
def verify_email(user, dest): """ For verifying an email address """ from r2.lib.pages import VerifyEmail key = passhash(user.name, user.email) user.email_verified = False user._commit() emaillink = ('http://' + g.domain + '/verification/' + key + query_string(dict(dest=dest))) print "Generated email verification link: " + emaillink g.cache.set("email_verify_%s" %key, user._id, time=1800) _system_email(user.email, VerifyEmail(user=user, emaillink = emaillink).render(style='email'), Email.Kind.VERIFY_EMAIL)
def verify_email(user, dest): """ For verifying an email address """ from r2.lib.pages import VerifyEmail user.email_verified = False user._commit() Award.take_away("verified_email", user) token = EmailVerificationToken._new(user) emaillink = ('http://' + g.domain + '/verification/' + token._id + query_string(dict(dest=dest))) g.log.debug("Generated email verification link: " + emaillink) _system_email(user.email, VerifyEmail(user=user, emaillink = emaillink).render(style='email'), Email.Kind.VERIFY_EMAIL)
def verify_email(user, dest): """ For verifying an email address """ from r2.lib.pages import VerifyEmail key = passhash(user.name, user.email) user.email_verified = False user._commit() Award.take_away("verified_email", user) emaillink = ('http://' + g.domain + '/verification/' + key + query_string(dict(dest=dest))) g.log.debug("Generated email verification link: " + emaillink) g.cache.set("email_verify_%s" %key, user._id, time=1800) _system_email(user.email, VerifyEmail(user=user, emaillink = emaillink).render(style='email'), Email.Kind.VERIFY_EMAIL)
def verify_email(user, dest): """ For verifying an email address """ from r2.lib.pages import VerifyEmail user.email_verified = False user._commit() Award.take_away("verified_email", user) token = EmailVerificationToken._new(user) emaillink = ('http://' + g.domain + '/verification/' + token._id + query_string(dict(dest=dest))) g.log.debug("Generated email verification link: " + emaillink) _system_email( user.email, VerifyEmail(user=user, emaillink=emaillink).render(style='email'), Email.Kind.VERIFY_EMAIL)
def GET_search(self, query, num, reverse, after, count, sort, restrict_sr): """Search links page.""" if query and '.' in query: url = sanitize_url(query, require_scheme = True) if url: return self.redirect("/submit" + query_string({'url':url})) if not restrict_sr: site = DefaultSR() else: site = c.site try: cleanup_message = None try: q = IndextankQuery(query, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse = reverse, count = count) except InvalidIndextankQuery: # delete special characters from the query and run again special_characters = '+-&|!(){}[]^"~*?:\\' translation = dict((ord(char), None) for char in list(special_characters)) cleaned = query.translate(translation) q = IndextankQuery(cleaned, site, sort) num, t, spane = self._search(q, num=num, after=after, reverse = reverse, count = count) cleanup_message = _('I couldn\'t understand your query, ' + 'so I simplified it and searched for ' + '"%(clean_query)s" instead.') % { 'clean_query': cleaned } res = SearchPage(_('search results'), query, t, num, content=spane, nav_menus = [SearchSortMenu(default=sort)], search_params = dict(sort = sort), infotext=cleanup_message, simple=False, site=c.site, restrict_sr=restrict_sr).render() return res except (IndextankException, socket.error), e: return self.search_fail(e)
def update_qstring(self, dict): merged = copy(request.get) merged.update(dict) return request.path + utils.query_string(merged)
def pre(self): g.cache.caches = (LocalCache(),) + g.cache.caches[1:] #check if user-agent needs a dose of rate-limiting ratelimit_agents() c.domain = g.domain c.response_wrappers = [] c.errors = ErrorSet() c.firsttime = firsttime() (c.user, maybe_admin) = \ valid_cookie(request.cookies.get(g.login_cookie)) if c.user: c.user_is_loggedin = True else: c.user = UnloggedUser(get_browser_langs()) c.user._load() if c.user_is_loggedin: if not c.user._loaded: c.user._load() c.modhash = c.user.modhash() if request.method.lower() == 'get': read_click_cookie() read_mod_cookie() if hasattr(c.user, 'msgtime') and c.user.msgtime: c.have_messages = c.user.msgtime c.user_is_admin = maybe_admin and c.user.name in g.admins c.over18 = over18() #set_browser_langs() set_host_lang() set_subreddit() set_content_type() set_iface_lang() set_content_lang() # check if the user has access to this subreddit if not c.site.can_view(c.user): abort(403, "forbidden") #check over 18 if c.site.over_18 and not c.over18: d = dict(dest=reddit_link(request.path, url = True) + utils.query_string(request.GET)) return redirect_to("/over18" + utils.query_string(d)) #check content cache if not c.user_is_loggedin: r = cache.get(self.request_key()) if r and request.method == 'GET': response = c.response response.headers = r.headers response.content = r.content response.status_code = r.status_code request.environ['pylons.routes_dict']['action'] = 'cached_response' # make sure to carry over the content type c.response_content_type = r.headers['content-type'] if r.headers.has_key('access-control'): c.response_access_control = r.headers['access-control'] c.used_cache = True # response wrappers have already been applied before cache write c.response_wrappers = []
def POST_comments(self, article, comment, context, sort, limit, depth): # VMenu validator will save the value of sort before we reach this # point. Now just redirect to GET mode. return self.redirect(request.fullpath + query_string(dict(sort=sort)))
def _to_referer(self, *a, **kw): res = func(self, *a, **kw) dest = res.get('redirect') or request.referer or '/' return self.redirect(dest + query_string(params))
def make_qs(self, **kw): """Convert the provided kw into a kw string suitable for app.post.""" return query_string(kw).lstrip("?")
def GET_login(self, *a, **kw): return self.redirect('/login' + query_string(dict(dest="/")))