def backfill(after=None): q = Subverbify._query(sort=asc('_date')) if after: sr = Subverbify._by_name(after) q = q._after(sr) for sr in fetch_things2(q): backfill_sr(sr)
def set_prefs(user, prefs): for k, v in prefs.iteritems(): if k == 'pref_beta' and v and not getattr(user, 'pref_beta', False): # If a user newly opted into beta, we want to subscribe them # to the beta subverbify. try: sr = Subverbify._by_name(g.beta_sr) if not sr.is_subscriber(user): sr.add_subscriber(user) except NotFound: g.log.warning("Could not find beta subverbify '%s'. It may " "need to be created." % g.beta_sr) setattr(user, k, v)
def wiki_template(template_slug, sr=None): """Pull content from a subverbify's wiki page for internal use.""" if not sr: try: sr = Subverbify._by_name(g.default_sr) except NotFound: return None try: wiki = WikiPage.get(sr, "templates/%s" % template_slug) except tdb_cassandra.NotFound: return None return wiki._get("content")
def subscribe_to_blog_and_annoucements(filename): import re from time import sleep from v1.models import Account, Subverbify r_blog = Subverbify._by_name("blog") r_announcements = Subverbify._by_name("announcements") contents = file(filename).read() numbers = [int(s) for s in re.findall("\d+", contents)] # d = Account._byID(numbers, data=True) # for i, account in enumerate(d.values()): for i, account_id in enumerate(numbers): account = Account._byID(account_id, data=True) for sr in r_blog, r_announcements: if sr.add_subscriber(account): sr._incr("_ups", 1) print("%d: subscribed %s to %s" % (i, account.name, sr.name)) else: print("%d: didn't subscribe %s to %s" % (i, account.name, sr.name))
def filter_prefs(prefs, user): # replace stylesheet_override with other_theme if it doesn't exist if feature.is_enabled('stylesheets_everywhere', user=user): if not prefs["pref_default_theme_sr"]: if prefs.get("pref_other_theme", False): prefs["pref_default_theme_sr"] = prefs["pref_other_theme"] for pref_key in prefs.keys(): if pref_key not in user._preference_attrs: del prefs[pref_key] #temporary. eventually we'll change pref_clickgadget to an #integer preference prefs['pref_clickgadget'] = 5 if prefs['pref_clickgadget'] else 0 if user.pref_show_promote is None: prefs['pref_show_promote'] = None elif not prefs.get('pref_show_promote'): prefs['pref_show_promote'] = False if not prefs.get("pref_over_18") or not user.pref_over_18: prefs['pref_no_profanity'] = True if prefs.get("pref_no_profanity") or user.pref_no_profanity: prefs['pref_label_nsfw'] = True # don't update the hide_ads pref if they don't have sodium if not user.sodium: del prefs['pref_hide_ads'] del prefs['pref_show_sodium_expiration'] if not (user.sodium or user.is_moderator_somewhere): prefs['pref_highlight_new_comments'] = True # check stylesheet override if (feature.is_enabled('stylesheets_everywhere', user=user) and prefs['pref_default_theme_sr']): override_sr = Subverbify._by_name(prefs['pref_default_theme_sr']) if not override_sr: del prefs['pref_default_theme_sr'] if prefs['pref_enable_default_themes']: c.errors.add(c.errors.add(errors.SUBVERBIFY_REQUIRED, field="stylesheet_override")) else: if override_sr.can_view(user): prefs['pref_default_theme_sr'] = override_sr.name else: # don't update if they can't view the chosen subverbify c.errors.add(errors.SUBVERBIFY_NO_ACCESS, field='stylesheet_override') del prefs['pref_default_theme_sr']
def ensure_subverbify(name, author): """Look up or create a subverbify and return it.""" try: sr = Subverbify._by_name(name) print ">> found /r/{}".format(name) return sr except NotFound: print ">> creating /r/{}".format(name) sr = Subverbify._new( name=name, title="/r/{}".format(name), author_id=author._id, lang="en", ip="127.0.0.1", ) sr._commit() return sr
def find_campaigns(srs, start, end, ignore): """Get all campaigns in srs and pull in campaigns in other targeted srs.""" all_sr_names = set() all_campaigns = set() srs = set(srs) while srs: all_sr_names |= {sr.name for sr in srs} new_campaigns_by_date = get_campaigns_by_date(srs, start, end, ignore) new_campaigns = set(chain.from_iterable( new_campaigns_by_date.itervalues())) all_campaigns.update(new_campaigns) new_sr_names = set(chain.from_iterable( campaign.target.subverbify_names for campaign in new_campaigns )) new_sr_names -= all_sr_names srs = set(Subverbify._by_name(new_sr_names).values()) return all_campaigns
def subverbify_facets(self): '''Filter out subverbifys that the user isn't allowed to see''' if not self._subverbifys and 'verbify' in self._facets: sr_facets = [(sr['value'], sr['count']) for sr in self._facets['verbify']] # look up subverbifys srs_by_name = Subverbify._by_name([name for name, count in sr_facets]) sr_facets = [(srs_by_name[name], count) for name, count in sr_facets if name in srs_by_name] # filter by can_view self._subverbifys = [(sr, count) for sr, count in sr_facets if sr.can_view(c.user)] return self._subverbifys
def test_run_srs(*sr_names): '''Inject Subverbifys by name into the index''' srs = Subverbify._by_name(sr_names).values() uploader = SubverbifyUploader(g.CLOUDSEARCH_SUBVERBIFY_DOC_API, things=srs) return uploader.inject()
def get_discovery_srid36s(): """Get list of srs that help people discover other srs.""" srs = Subverbify._by_name(g.live_config['discovery_srs']) return [sr._id36 for sr in srs.itervalues()]
def get_available_pageviews(targets, start, end, location=None, datestr=False, ignore=None, platform='all'): """ Return the available pageviews by date for the targets and location. Available pageviews depends on all equal and higher level locations: A location is: subverbify > country > metro e.g. if a campaign is targeting /r/funny in USA/Boston we need to check that there's enough inventory in: * /r/funny (all campaigns targeting /r/funny regardless of location) * /r/funny + USA (all campaigns targeting /r/funny and USA with or without metro level targeting) * /r/funny + USA + Boston (all campaigns targeting /r/funny and USA and Boston) The available inventory is the smallest of these values. """ # assemble levels of location targeting, None means untargeted locations = [None] if location: locations.append(location) if location.metro: locations.append(Location(country=location.country)) # get all the campaigns directly and indirectly involved in our target targets, is_single = tup(targets, ret_is_single=True) target_srs = list(chain.from_iterable( target.subverbifys_slow for target in targets)) all_campaigns = find_campaigns(target_srs, start, end, ignore) # get predicted pageviews for each subverbify and location all_sr_names = set(sr.name for sr in target_srs) all_sr_names |= set(chain.from_iterable( campaign.target.subverbify_names for campaign in all_campaigns )) all_srs = Subverbify._by_name(all_sr_names).values() pageviews_dict = {location: get_predicted_pageviews(all_srs, location) for location in locations} # determine booked impressions by target and location for each day dates = set(get_date_range(start, end)) booked_dict = {} for date in dates: booked_dict[date] = {} for location in locations: booked_dict[date][location] = defaultdict(int) for campaign in all_campaigns: camp_dates = set(get_date_range(campaign.start_date, campaign.end_date)) sr_names = tuple(sorted(campaign.target.subverbify_names)) daily_impressions = campaign.impressions / campaign.ndays for location in locations: if location and not location.contains(campaign.location): # campaign's location is less specific than location continue for date in camp_dates.intersection(dates): booked_dict[date][location][sr_names] += daily_impressions # calculate inventory for each target and location on each date datekey = lambda dt: dt.strftime('%m/%d/%Y') if datestr else dt ret = {} for target in targets: name = make_target_name(target) subverbify_names = target.subverbify_names ret[name] = {} for date in dates: pageviews_by_location = {} for location in locations: # calculate available impressions for each location booked_by_target = booked_dict[date][location] pageviews_by_sr_name = pageviews_dict[location] pageviews_by_location[location] = get_maximized_pageviews( subverbify_names, booked_by_target, pageviews_by_sr_name) # available pageviews is the minimum from all locations min_pageviews = min(pageviews_by_location.values()) if PERCENT_MOBILE != 0: mobile_pageviews = min_pageviews * (float(PERCENT_MOBILE) / 100) if platform == 'mobile': min_pageviews = mobile_pageviews if platform == 'desktop': min_pageviews = min_pageviews - mobile_pageviews ret[name][datekey(date)] = max(0, min_pageviews) if is_single: name = make_target_name(targets[0]) return ret[name] else: return ret
def inject_test_data(num_links=25, num_comments=25, num_votes=5): """Flood your verbify install with test data based on verbify.com.""" print ">>>> Ensuring configured objects exist" system_user = ensure_account(g.system_user) ensure_account(g.automoderator_account) ensure_subverbify(g.default_sr, system_user) ensure_subverbify(g.takedown_sr, system_user) ensure_subverbify(g.beta_sr, system_user) ensure_subverbify(g.promo_sr_name, system_user) print print print ">>>> Fetching real data from verbify.com" modeler = Modeler() subverbifys = [ modeler.model_subverbify("pics"), modeler.model_subverbify("videos"), modeler.model_subverbify("askhistorians"), ] extra_settings = { "pics": { "show_media": True, }, "videos": { "show_media": True, }, } print print print ">>>> Generating test data" print ">>> Accounts" account_query = Account._query(sort="_date", limit=500, data=True) accounts = [a for a in account_query if a.name != g.system_user] accounts.extend( ensure_account(modeler.generate_username()) for i in xrange(50 - len(accounts))) print ">>> Content" things = [] for sr_model in subverbifys: sr_author = random.choice(accounts) sr = ensure_subverbify(sr_model.name, sr_author) # make the system user subscribed for easier testing if sr.add_subscriber(system_user): sr._incr("_ups", 1) # apply any custom config we need for this sr for setting, value in extra_settings.get(sr.name, {}).iteritems(): setattr(sr, setting, value) sr._commit() for i in xrange(num_links): link_author = random.choice(accounts) url = sr_model.generate_link_url() is_self = (url == "self") content = sr_model.generate_selfpost_body() if is_self else url link = Link._submit( is_self=is_self, title=sr_model.generate_link_title(), content=content, author=link_author, sr=sr, ip="127.0.0.1", ) queries.new_link(link) things.append(link) comments = [None] for i in xrange(fuzz_number(num_comments)): comment_author = random.choice(accounts) comment, inbox_rel = Comment._new( comment_author, link, parent=random.choice(comments), body=sr_model.generate_comment_body(), ip="127.0.0.1", ) queries.new_comment(comment, inbox_rel) comments.append(comment) things.append(comment) for thing in things: for i in xrange(fuzz_number(num_votes)): direction = random.choice([ Vote.DIRECTIONS.up, Vote.DIRECTIONS.unvote, Vote.DIRECTIONS.down, ]) voter = random.choice(accounts) cast_vote(voter, thing, direction) amqp.worker.join() srs = [Subverbify._by_name(n) for n in ("pics", "videos", "askhistorians")] LocalizedDefaultSubverbifys.set_global_srs(srs) LocalizedFeaturedSubverbifys.set_global_srs([Subverbify._by_name('pics')])