def new_promotion(is_self, title, content, author, ip): """ Creates a new promotion with the provided title, etc, and sets it status to be 'unpaid'. """ sr = Subverbify._byID(Subverbify.get_promote_srid()) l = Link._submit( is_self=is_self, title=title, content=content, author=author, sr=sr, ip=ip, ) l.promoted = True l.disable_comments = False l.sendreplies = True PromotionLog.add(l, 'promotion created') update_promote_status(l, PROMOTE_STATUS.unpaid) # the user has posted a promotion, so enable the promote menu unless # they have already opted out if author.pref_show_promote is not False: author.pref_show_promote = True author._commit() # notify of new promo emailer.new_promo(l) return l
def backfill(after=None): q = Subverbify._query(sort=asc('_date')) if after: sr = Subverbify._by_name(after) q = q._after(sr) for sr in fetch_things2(q): backfill_sr(sr)
def process_message(msgs, chan): """Update get_links(), the Links by Subverbify precomputed query. get_links() is a CachedResult which is stored in permacache. To update these objects we need to do a read-modify-write which requires obtaining a lock. Sharding these updates by subverbify allows us to run multiple consumers (but ideally just one per shard) to avoid lock contention. """ from v1.lib.db.queries import add_queries, get_links link_names = {msg.body for msg in msgs} links = Link._by_fullname(link_names, return_dict=False) print 'Processing %r' % (links,) links_by_sr_id = defaultdict(list) for link in links: links_by_sr_id[link.sr_id].append(link) srs_by_id = Subverbify._byID(links_by_sr_id.keys(), stale=True) for sr_id, links in links_by_sr_id.iteritems(): with g.stats.get_timer("link_vote_processor.subverbify_queries"): sr = srs_by_id[sr_id] add_queries( queries=[get_links(sr, sort, "all") for sort in SORTS], insert_items=links, )
def batch_lookups(self): super(LinkUploader, self).batch_lookups() author_ids = [ thing.author_id for thing in self.things if hasattr(thing, 'author_id') ] try: self.accounts = Account._byID(author_ids, data=True, return_dict=True) except NotFound: if self.use_safe_get: self.accounts = safe_get(Account._byID, author_ids, data=True, return_dict=True) else: raise sr_ids = [ thing.sr_id for thing in self.things if hasattr(thing, 'sr_id') ] try: self.srs = Subverbify._byID(sr_ids, data=True, return_dict=True) except NotFound: if self.use_safe_get: self.srs = safe_get(Subverbify._byID, sr_ids, data=True, return_dict=True) else: raise
def test_invalid_thing(self): with self.assertRaises(HTTPForbidden): sr = Subverbify(id=1) with patch.object(VByName, "run", return_value=sr): self.validator.run('fullname', None) self.assertFalse(self.validator.has_errors)
def get_reply_to_address(message): """Construct a reply-to address that encodes the message id. The address is of the form: zendeskreply+{message_id36}-{email_mac} where the mac is generated from {message_id36} using the `modmail_email_secret` The reply address should be configured with the inbound email service so that replies to our messages are routed back to the app somehow. For mailgun this involves adding a Routes filter for messages sent to "zendeskreply\+*@". to be forwarded to POST /api/zendeskreply. """ # all email replies are treated as replies to the first message in the # conversation. this is to get around some peculiarities of zendesk if message.first_message: first_message = Message._byID(message.first_message, data=True) else: first_message = message email_id = first_message._id36 email_mac = hmac.new(g.secrets['modmail_email_secret'], email_id, hashlib.sha256).hexdigest() reply_id = "zendeskreply+{email_id}-{email_mac}".format( email_id=email_id, email_mac=email_mac) sr = Subverbify._byID(message.sr_id, data=True) return "r/{subverbify} mail <{reply_id}@{domain}>".format( subverbify=sr.name, reply_id=reply_id, domain=g.modmail_email_domain)
def populate_spam_filtered(): from v1.lib.db.queries import get_spam_links, get_spam_comments from v1.lib.db.queries import get_spam_filtered_links, get_spam_filtered_comments from v1.models.query_cache import CachedQueryMutator def was_filtered(thing): if thing._spam and not thing._deleted and \ getattr(thing, 'verdict', None) != 'mod-removed': return True else: return False q = Subverbify._query(sort=asc('_date')) for sr in fetch_things2(q): print 'Processing %s' % sr.name links = Thing._by_fullname(get_spam_links(sr), data=True, return_dict=False) comments = Thing._by_fullname(get_spam_comments(sr), data=True, return_dict=False) insert_links = [l for l in links if was_filtered(l)] insert_comments = [c for c in comments if was_filtered(c)] with CachedQueryMutator() as m: m.insert(get_spam_filtered_links(sr), insert_links) m.insert(get_spam_filtered_comments(sr), insert_comments)
def test_remove_nsfw_collection_srnames_on_frontpage( self, get_nsfw_collections_srnames): get_nsfw_collections_srnames.return_value = set( nsfw_collection.sr_names) srname = "test1" subverbify = Subverbify(name=srname) Subverbify.user_subverbifys = MagicMock(return_value=[ Subverbify(name=nice_srname), Subverbify(name=questionably_nsfw), ]) frontpage_srnames = srnames_from_site(self.logged_in, Frontpage) swf_srnames = srnames_from_site(self.logged_in, subverbify) self.assertEqual(frontpage_srnames, {Frontpage.name, nice_srname}) self.assertTrue(len(frontpage_srnames & {questionably_nsfw}) == 0)
def test_subverbify_logged_in(self, user_subverbifys): user_subverbifys.return_value = subscriptions srname = "test1" subverbify = Subverbify(name=srname) srnames = srnames_from_site(self.logged_in, subverbify) self.assertEqual(srnames, {srname})
def ensure_subverbify(name, author): """Look up or create a subverbify and return it.""" try: sr = Subverbify._by_name(name) print ">> found /r/{}".format(name) return sr except NotFound: print ">> creating /r/{}".format(name) sr = Subverbify._new( name=name, title="/r/{}".format(name), author_id=author._id, lang="en", ip="127.0.0.1", ) sr._commit() return sr
def set_downs(): sr_counts = count.get_sr_counts() names = [k for k, v in sr_counts.iteritems() if v != 0] srs = Subverbify._by_fullname(names) for name in names: sr, c = srs[name], sr_counts[name] if c != sr._downs and c > 0: sr._downs = max(c, 0) sr._commit()
def GET_timeverbify_redirect(self, timeverbify, rest=None): sr_name = "t:" + timeverbify if not Subverbify.is_valid_name(sr_name, allow_time_srs=True): abort(400) if rest: rest = str(rest) else: rest = '' return self.redirect("/r/%s/%s" % (sr_name, rest), code=301)
def add_allow_top_to_srs(): "Add the allow_top property to all stored subverbifys" from v1.models import Subverbify from v1.lib.db.operators import desc from v1.lib.utils import fetch_things2 q = Subverbify._query(Subverbify.c._spam == (True, False), sort=desc('_date')) for sr in fetch_things2(q): sr.allow_top = True sr._commit()
def get_message_subject(message): sr = Subverbify._byID(message.sr_id, data=True) if message.first_message: first_message = Message._byID(message.first_message, data=True) conversation_subject = first_message.subject else: conversation_subject = message.subject return u"[r/{subverbify} mail]: {subject}".format( subverbify=sr.name, subject=_force_unicode(conversation_subject))
def get_rising_items(omit_sr_ids, count=4): """Get links that are rising right now.""" all_rising = rising.get_all_rising() candidate_sr_ids = {sr_id for link, score, sr_id in all_rising}.difference(omit_sr_ids) link_fullnames = [link for link, score, sr_id in all_rising if sr_id in candidate_sr_ids] link_fullnames_to_show = random_sample(link_fullnames, count) rising_links = Link._by_fullname(link_fullnames_to_show, return_dict=False, data=True) rising_items = [ExploreItem(TYPE_RISING, 'ris', Subverbify._byID(l.sr_id), l) for l in rising_links] return rising_items
def wiki_template(template_slug, sr=None): """Pull content from a subverbify's wiki page for internal use.""" if not sr: try: sr = Subverbify._by_name(g.default_sr) except NotFound: return None try: wiki = WikiPage.get(sr, "templates/%s" % template_slug) except tdb_cassandra.NotFound: return None return wiki._get("content")
def set_prefs(user, prefs): for k, v in prefs.iteritems(): if k == 'pref_beta' and v and not getattr(user, 'pref_beta', False): # If a user newly opted into beta, we want to subscribe them # to the beta subverbify. try: sr = Subverbify._by_name(g.beta_sr) if not sr.is_subscriber(user): sr.add_subscriber(user) except NotFound: g.log.warning("Could not find beta subverbify '%s'. It may " "need to be created." % g.beta_sr) setattr(user, k, v)
def set_last_sr_ban(self, things): by_srid = {} for thing in things: if getattr(thing, 'sr_id', None) is not None: by_srid.setdefault(thing.sr_id, []).append(thing) if by_srid: srs = Subverbify._byID(by_srid.keys(), data=True, return_dict=True) for sr_id, sr_things in by_srid.iteritems(): sr = srs[sr_id] sr.last_mod_action = datetime.now(g.tz) sr._commit() sr._incr('mod_actions', len(sr_things))
def subscribe_to_blog_and_annoucements(filename): import re from time import sleep from v1.models import Account, Subverbify r_blog = Subverbify._by_name("blog") r_announcements = Subverbify._by_name("announcements") contents = file(filename).read() numbers = [int(s) for s in re.findall("\d+", contents)] # d = Account._byID(numbers, data=True) # for i, account in enumerate(d.values()): for i, account_id in enumerate(numbers): account = Account._byID(account_id, data=True) for sr in r_blog, r_announcements: if sr.add_subscriber(account): sr._incr("_ups", 1) print("%d: subscribed %s to %s" % (i, account.name, sr.name)) else: print("%d: didn't subscribe %s to %s" % (i, account.name, sr.name))
def process_message(msg): msg_dict = json.loads(msg.body) if msg_dict["event"] == "new_message": message_id36 = msg_dict["message_id36"] message = Message._byID36(message_id36, data=True) send_modmail_email(message) elif msg_dict["event"] == "blocked_muted": subverbify_id36 = msg_dict["subverbify_id36"] sr = Subverbify._byID36(subverbify_id36, data=True) parent_id36 = msg_dict["parent_id36"] parent = Message._byID36(parent_id36, data=True) sender_email = msg_dict["sender_email"] incoming_email_id = msg_dict["incoming_email_id"] send_blocked_muted_email(sr, parent, sender_email, incoming_email_id)
def _mock_link(self, id=1, author_id=1, sr_id=1, can_comment=True, can_view_promo=True, **kwargs): kwargs['id'] = id kwargs['author_id'] = author_id kwargs['sr_id'] = sr_id link = Link(**kwargs) self.autopatch(VByName, "run", return_value=link) sr = Subverbify(id=sr_id) self.autopatch(Subverbify, "_byID", return_value=sr) self.autopatch(Subverbify, "can_comment", return_value=can_comment) self.autopatch(Link, "can_view_promo", return_value=can_view_promo) return link
def filter_prefs(prefs, user): # replace stylesheet_override with other_theme if it doesn't exist if feature.is_enabled('stylesheets_everywhere', user=user): if not prefs["pref_default_theme_sr"]: if prefs.get("pref_other_theme", False): prefs["pref_default_theme_sr"] = prefs["pref_other_theme"] for pref_key in prefs.keys(): if pref_key not in user._preference_attrs: del prefs[pref_key] #temporary. eventually we'll change pref_clickgadget to an #integer preference prefs['pref_clickgadget'] = 5 if prefs['pref_clickgadget'] else 0 if user.pref_show_promote is None: prefs['pref_show_promote'] = None elif not prefs.get('pref_show_promote'): prefs['pref_show_promote'] = False if not prefs.get("pref_over_18") or not user.pref_over_18: prefs['pref_no_profanity'] = True if prefs.get("pref_no_profanity") or user.pref_no_profanity: prefs['pref_label_nsfw'] = True # don't update the hide_ads pref if they don't have sodium if not user.sodium: del prefs['pref_hide_ads'] del prefs['pref_show_sodium_expiration'] if not (user.sodium or user.is_moderator_somewhere): prefs['pref_highlight_new_comments'] = True # check stylesheet override if (feature.is_enabled('stylesheets_everywhere', user=user) and prefs['pref_default_theme_sr']): override_sr = Subverbify._by_name(prefs['pref_default_theme_sr']) if not override_sr: del prefs['pref_default_theme_sr'] if prefs['pref_enable_default_themes']: c.errors.add(c.errors.add(errors.SUBVERBIFY_REQUIRED, field="stylesheet_override")) else: if override_sr.can_view(user): prefs['pref_default_theme_sr'] = override_sr.name else: # don't update if they can't view the chosen subverbify c.errors.add(errors.SUBVERBIFY_NO_ACCESS, field='stylesheet_override') del prefs['pref_default_theme_sr']
def store_keys(key, maxes): # we're building queries using queries.py, but we could make the # queries ourselves if we wanted to avoid the individual lookups # for accounts and subverbifys. # Note that we're only generating the 'sr-' type queries here, but # we're also able to process the other listings generated by the # old migrate.mr_permacache for convenience userrel_fns = dict(liked=queries.get_liked, disliked=queries.get_disliked, saved=queries.get_saved, hidden=queries.get_hidden) if key.startswith('user-'): acc_str, keytype, account_id = key.split('-') account_id = int(account_id) fn = queries.get_submitted if keytype == 'submitted' else queries.get_comments q = fn(Account._byID(account_id), 'new', 'all') q._insert_tuples([(fname, float(timestamp)) for (timestamp, fname) in maxes]) elif key.startswith('sr-'): sr_str, sort, time, sr_id = key.split('-') sr_id = int(sr_id) if sort == 'controversy': # I screwed this up in the mapper and it's too late to fix # it sort = 'controversial' q = queries.get_links(Subverbify._byID(sr_id), sort, time) q._insert_tuples( [tuple([item[-1]] + map(float, item[:-1])) for item in maxes]) elif key.startswith('domain/'): d_str, sort, time, domain = key.split('/') q = queries.get_domain_links(domain, sort, time) q._insert_tuples( [tuple([item[-1]] + map(float, item[:-1])) for item in maxes]) elif key.split('-')[0] in userrel_fns: key_type, account_id = key.split('-') account_id = int(account_id) fn = userrel_fns[key_type] q = fn(Account._byID(account_id)) q._insert_tuples( [tuple([item[-1]] + map(float, item[:-1])) for item in maxes])
def popular_searches(include_over_18=True): top_verbifys = Subverbify._query(Subverbify.c.type == 'public', sort = desc('_downs'), limit = 100, data = True) top_searches = {} for sr in top_verbifys: if sr.quarantine: continue if sr.over_18 and not include_over_18: continue name = sr.name.lower() for i in xrange(min(len(name), 3)): query = name[:i + 1] r = search_verbifys(query, include_over_18) top_searches[query] = r return top_searches
def subverbify_facets(self): '''Filter out subverbifys that the user isn't allowed to see''' if not self._subverbifys and 'verbify' in self._facets: sr_facets = [(sr['value'], sr['count']) for sr in self._facets['verbify']] # look up subverbifys srs_by_name = Subverbify._by_name([name for name, count in sr_facets]) sr_facets = [(srs_by_name[name], count) for name, count in sr_facets if name in srs_by_name] # filter by can_view self._subverbifys = [(sr, count) for sr, count in sr_facets if sr.can_view(c.user)] return self._subverbifys
def find_campaigns(srs, start, end, ignore): """Get all campaigns in srs and pull in campaigns in other targeted srs.""" all_sr_names = set() all_campaigns = set() srs = set(srs) while srs: all_sr_names |= {sr.name for sr in srs} new_campaigns_by_date = get_campaigns_by_date(srs, start, end, ignore) new_campaigns = set(chain.from_iterable( new_campaigns_by_date.itervalues())) all_campaigns.update(new_campaigns) new_sr_names = set(chain.from_iterable( campaign.target.subverbify_names for campaign in new_campaigns )) new_sr_names -= all_sr_names srs = set(Subverbify._by_name(new_sr_names).values()) return all_campaigns
def load_all_verbifys(): query_cache = {} q = Subverbify._query(Subverbify.c.type == 'public', Subverbify.c._spam == False, Subverbify.c._downs > 1, sort = (desc('_downs'), desc('_ups')), data = True) for sr in utils.fetch_things2(q): if sr.quarantine: continue name = sr.name.lower() for i in xrange(len(name)): prefix = name[:i + 1] names = query_cache.setdefault(prefix, []) if len(names) < 10: names.append((sr.name, sr.over_18)) for name_prefix, subverbifys in query_cache.iteritems(): SubverbifysByPartialName._set_values(name_prefix, {'tups': subverbifys})
def _mock_comment(self, id=1, author_id=1, link_id=1, sr_id=1, can_comment=True, can_view_promo=True, is_moderator=False, **kwargs): kwargs['id'] = id kwargs['author_id'] = author_id kwargs['link_id'] = link_id kwargs['sr_id'] = sr_id comment = Comment(**kwargs) self.autopatch(VByName, "run", return_value=comment) link = Link(id=link_id, sr_id=sr_id) self.autopatch(Link, "_byID", return_value=link) sr = Subverbify(id=sr_id) self.autopatch(Subverbify, "_byID", return_value=sr) self.autopatch(Subverbify, "can_comment", return_value=can_comment) self.autopatch(Link, "can_view_promo", return_value=can_view_promo) self.autopatch(Subverbify, "is_moderator", return_value=is_moderator) return comment
def get_recommendations(srs, count=10, source=SRC_MULTIVERBIFYS, to_omit=None, match_set=True, over18=False): """Return subverbifys recommended if you like the given subverbifys. Args: - srs is one Subverbify object or a list of Subverbifys - count is total number of results to return - source is a prefix telling which set of recommendations to use - to_omit is a single or list of subverbify id36s that should not be be included. (Useful for omitting recs that were already rejected.) - match_set=True will return recs that are similar to each other, useful for matching the "theme" of the original set - over18 content is filtered unless over18=True or one of the original srs is over18 """ srs = tup(srs) to_omit = tup(to_omit) if to_omit else [] # fetch more recs than requested because some might get filtered out rec_id36s = SRRecommendation.for_srs([sr._id36 for sr in srs], to_omit, count * 2, source, match_set=match_set) # always check for private subverbifys at runtime since type might change rec_srs = Subverbify._byID36(rec_id36s, return_dict=False) filtered = [sr for sr in rec_srs if is_visible(sr)] # don't recommend adult srs unless one of the originals was over_18 if not over18 and not any(sr.over_18 for sr in srs): filtered = [sr for sr in filtered if not sr.over_18] return filtered[:count]
def get_comment_items(srs, src, count=4): """Get hot links from srs, plus top comment from each link.""" link_fullnames = normalized_hot([sr._id for sr in srs]) hot_links = Link._by_fullname(link_fullnames[:count], return_dict=False) top_comments = [] for link in hot_links: builder = CommentBuilder(link, operators.desc('_confidence'), comment=None, context=None, num=1, load_more=False) listing = NestedListing(builder, parent_name=link._fullname).listing() top_comments.extend(listing.things) srs = Subverbify._byID([com.sr_id for com in top_comments]) links = Link._byID([com.link_id for com in top_comments]) comment_items = [ExploreItem(TYPE_COMMENT, src, srs[com.sr_id], links[com.link_id], com) for com in top_comments] return comment_items