コード例 #1
0
def run(verbose=True, sleep_time = 60, num_items = 1):
    key = "indextank_cursor"
    cursor = g.cache.get(key)
    if cursor is None:
        raise ValueError("%s is not set!" % key)
    cursor = int(cursor)

    while True:
        if verbose:
            print "Looking for %d items with _id < %d" % (num_items, cursor)
        q = Link._query(sort = desc('_id'),
                        limit = num_items)
        q._after(Link._byID(cursor))
        last_date = None
        for item in q:
            cursor = item._id
            last_date = item._date
            amqp.add_item('indextank_changes', item._fullname,
                      message_id = item._fullname,
                      delivery_mode = amqp.DELIVERY_TRANSIENT)
        g.cache.set(key, cursor)

        if verbose:
            if last_date:
                last_date = last_date.strftime("%Y-%m-%d")
            print ("Just enqueued %d items. New cursor=%s (%s). Sleeping %d seconds."
                   % (num_items, cursor, last_date, sleep_time))

        sleep(sleep_time)
コード例 #2
0
ファイル: scraper.py プロジェクト: kevinrose/diggit
def submit_all():
    from r2.models import Subdigg, Account, Link, NotFound
    from r2.lib.media import set_media
    from r2.lib.db import queries
    sr = Subdigg._by_name('testmedia')
    author = Account._by_name('testmedia')
    links = []
    for url in test_urls:
        try:
            # delete any existing version of the link
            l = Link._by_url(url, sr)
            print "Deleting %s" % l
            l._deleted = True
            l._commit()
        except NotFound:
            pass

        l = Link._submit(url, url, author, sr, '0.0.0.0')

        try:
            set_media(l)
        except Exception, e:
            print e

        if g.write_query_queue:
            queries.new_link(l)

        links.append(l)
コード例 #3
0
ファイル: queries.py プロジェクト: codyro/reddit
def get_links(sr, sort, time, merge_batched=True):
    """General link query for a subreddit."""
    q = Link._query(Link.c.sr_id == sr._id,
                    sort = db_sort(sort),
                    data = True)

    if time != 'all':
        q._filter(db_times[time])

    res = make_results(q)

    # see the discussion above batched_time_times
    if (merge_batched
        and g.use_query_cache
        and sort in batched_time_sorts
        and time in batched_time_times):

        byday = Link._query(Link.c.sr_id == sr._id,
                            sort = db_sort(sort), data=True)
        byday._filter(db_times['day'])

        res = merge_results(res,
                            make_results(byday))

    return res
コード例 #4
0
ファイル: promotecontroller.py プロジェクト: 6r3nt/reddit
    def GET_report(self, start, end, link_text=None, owner=None):
        now = datetime.now(g.tz).replace(hour=0, minute=0, second=0,
                                         microsecond=0)
        end = end or now - timedelta(days=1)
        start = start or end - timedelta(days=7)

        links = []
        bad_links = []
        owner_name = owner.name if owner else ''

        if owner:
            promo_weights = PromotionWeights.get_campaigns(start, end,
                                                           author_id=owner._id)
            campaign_ids = [pw.promo_idx for pw in promo_weights]
            campaigns = PromoCampaign._byID(campaign_ids, data=True)
            link_ids = {camp.link_id for camp in campaigns.itervalues()}
            links.extend(Link._byID(link_ids, data=True, return_dict=False))

        if link_text is not None:
            id36s = link_text.replace(',', ' ').split()
            try:
                links_from_text = Link._byID36(id36s, data=True)
            except NotFound:
                links_from_text = {}

            bad_links = [id36 for id36 in id36s if id36 not in links_from_text]
            links.extend(links_from_text.values())

        content = PromoteReport(links, link_text, owner_name, bad_links, start,
                                end)
        if c.render_style == 'csv':
            return content.as_csv()
        else:
            return PromotePage(title=_("sponsored link report"),
                               content=content).render()
コード例 #5
0
ファイル: queue.py プロジェクト: 13steinj/reddit-plugin-dfp
    def _handle_check_edits(payload):
        existing = Link._by_fullname(payload["link"], data=True)
        creative = creatives_service.get_creative(existing)

        link = utils.dfp_creative_to_link(
            creative, link=Link._by_fullname(payload["link"], data=True))

        link.dfp_checking_edits = False
        link._commit()
コード例 #6
0
ファイル: test_validator.py プロジェクト: heqzha/reddit
    def _mock_link(id=1, author_id=1, sr_id=1, **kwargs):
        kwargs['id'] = id
        kwargs['author_id'] = author_id
        kwargs['sr_id'] = sr_id

        link = Link(**kwargs)
        VByName.run = MagicMock(return_value=link)

        sr = Subreddit(id=sr_id)
        link.subreddit = sr

        return link
コード例 #7
0
 def _handle_adzerk(msg):
     data = json.loads(msg.body)
     g.log.debug('data: %s' % data)
     action = data.get('action')
     if action == 'deactivate_link':
         link = Link._by_fullname(data['link'], data=True)
         _deactivate_link(link)
     elif action == 'deactivate_campaign':
         link = Link._by_fullname(data['link'], data=True)
         campaign = PromoCampaign._by_fullname(data['campaign'], data=True)
         _deactivate_campaign(link, campaign)
     elif action == 'update_adzerk':
         link = Link._by_fullname(data['link'], data=True)
         campaign = PromoCampaign._by_fullname(data['campaign'], data=True)
         _update_adzerk(link, campaign)
コード例 #8
0
ファイル: recommender.py プロジェクト: Shilohtd/reddit
def get_comment_items(srs, src, count=4):
    """Get hot links from srs, plus top comment from each link."""
    link_fullnames = normalized_hot([sr._id for sr in srs])
    hot_links = Link._by_fullname(link_fullnames[:count], return_dict=False)
    top_comments = []
    for link in hot_links:
        builder = CommentBuilder(
            link, operators.desc("_confidence"), comment=None, context=None, num=1, load_more=False
        )
        listing = NestedListing(builder, parent_name=link._fullname).listing()
        top_comments.extend(listing.things)
    srs = Subreddit._byID([com.sr_id for com in top_comments])
    links = Link._byID([com.link_id for com in top_comments])
    comment_items = [ExploreItem(TYPE_COMMENT, src, srs[com.sr_id], links[com.link_id], com) for com in top_comments]
    return comment_items
コード例 #9
0
ファイル: test_validator.py プロジェクト: Liwink/reddit
    def _mock_link(id=1, author_id=1, sr_id=1, can_comment=True, can_view_promo=True, **kwargs):
        kwargs["id"] = id
        kwargs["author_id"] = author_id
        kwargs["sr_id"] = sr_id

        link = Link(**kwargs)
        VByName.run = MagicMock(return_value=link)

        sr = Subreddit(id=sr_id)
        link.subreddit_slow = sr

        Subreddit.can_comment = MagicMock(return_value=can_comment)
        Link.can_view_promo = MagicMock(return_value=can_view_promo)

        return link
コード例 #10
0
ファイル: cassamodels.py プロジェクト: Anenome/reddit
def test_cassasavehide():
    from r2.models import Account, Link, CassandraSave, SavesByAccount
    from r2.lib.db import tdb_cassandra

    a = list(Account._query(sort=desc('_date'),
                            limit=1))[0]
    l = list(Link._query(sort=desc('_date'),
                         limit=1))[0]

    try:
        csh = CassandraSave._fast_query(a._id36, l._id36)
        print "Warning! Deleting!", csh
        CassandraSave._fast_query(a._id36, l._id36)._destroy()
    except tdb_cassandra.NotFound:
        pass

    csh = CassandraSave._save(a, l)
    csh._commit()
    assert CassandraSave._fast_query(a._id36, l._id36) == csh

    # check for the SavesByAccount object too
    assert SavesByAccount._byID(a._id36)[csh._id] == csh._id

    csh._destroy()

    try:
        CassandraSave._fast_query(a._id36, l._id36) == csh
        raise Exception("shouldn't exist after destroying")
    except tdb_cassandra.NotFound:
        pass

    try:
        assert csh._id not in SavesByAccount._byID(a._id36, properties = csh._id)._values()
    except tdb_cassandra.NotFound:
        pass
コード例 #11
0
ファイル: queries.py プロジェクト: rram/reddit
def get_spam_filtered_links(sr_id):
    """ NOTE: This query will never run unless someone does an "update" on it,
        but that will probably timeout. Use insert_spam_filtered_links."""
    return Link._query(Link.c.sr_id == sr_id,
                       Link.c._spam == True,
                       Link.c.verdict != 'mod-removed',
                       sort = db_sort('new'))
コード例 #12
0
ファイル: utils.py プロジェクト: HerculesCE/reddit
def url_links_builder(url, exclude=None, num=None, after=None, reverse=None,
                      count=None):
    from r2.lib.template_helpers import add_sr
    from r2.models import IDBuilder, Link, NotFound
    from operator import attrgetter

    if url.startswith('/'):
        url = add_sr(url, force_hostname=True)

    try:
        links = Link._by_url(url, None)
    except NotFound:
        links = []

    links = [ link for link in links
                   if link._fullname != exclude ]
    links.sort(key=attrgetter('num_comments'), reverse=True)

    # don't show removed links in duplicates unless admin or mod
    # or unless it's your own post
    def include_link(link):
        return (not link._spam or
                (c.user_is_loggedin and
                    (link.author_id == c.user._id or
                        c.user_is_admin or
                        link.subreddit.is_moderator(c.user))))

    builder = IDBuilder([link._fullname for link in links], skip=True,
                        keep_fn=include_link, num=num, after=after,
                        reverse=reverse, count=count)

    return builder
コード例 #13
0
ファイル: bidding.py プロジェクト: Jeerok/reddit
    def bid_history(cls, start_date, end_date = None, account_id = None):
        from r2.models import Link
        from r2.lib import promote
        start_date = to_date(start_date)
        end_date   = to_date(end_date)
        q = cls.query()
        q = q.filter(and_(cls.date >= start_date, cls.date < end_date))
        q = list(q)

        links = Link._by_fullname([x.thing_name for x in q], data=True)

        d = start_date
        res = []
        while d < end_date:
            bid = 0
            refund = 0
            for i in q:
                if d == i.date:
                    l = links[i.thing_name]
                    if (not promote.is_rejected(l) and 
                        not promote.is_unpaid(l) and 
                        not l._deleted and 
                        i.promo_idx in getattr(l, 'campaigns', {})):
                        
                        camp = l.campaigns[i.promo_idx]
                        bid += i.bid
                        refund += i.bid if camp[-1] <= 0 else 0
            res.append([d, bid, refund])
            d += datetime.timedelta(1)
        return res
コード例 #14
0
ファイル: normalized_hot.py プロジェクト: cmak/reddit
def get_hot(sr):
    q = Link._query(Link.c.sr_id == sr._id,
                    sort = desc('_hot'),
                    write_cache = True,
                    limit = 150)

    iden = q._iden()

    read_cache = True
    #if query is in the cache, the expire flag is true, and the access
    #time is old, set read_cache = False
    if cache.get(iden) is not None:
        if cache.get(expire_key(sr)):
            access_time = cache.get(access_key(sr))
            if not access_time or datetime.now() > access_time + expire_delta:
                cache.delete(expire_key(sr))
                read_cache = False
    #if the query isn't in the cache, set read_cache to false so we
    #record the access time
    else:
        read_cache = False

    if not read_cache:
        cache.set(access_key(sr), datetime.now())
    
    q._read_cache = read_cache
    res = list(q)
    
    #set the #1 link so we can ignore it later. expire after TOP_CACHE
    #just in case something happens and that sr doesn't update
    if res:
        cache.set(top_key(sr), res[0]._fullname, TOP_CACHE)

    return res
コード例 #15
0
ファイル: promote.py プロジェクト: karthikv/reddit
def get_promos(date, sr_names=None, link=None):
    campaign_ids = PromotionWeights.get_campaign_ids(date, sr_names=sr_names, link=link)
    campaigns = PromoCampaign._byID(campaign_ids, data=True, return_dict=False)
    link_ids = {camp.link_id for camp in campaigns}
    links = Link._byID(link_ids, data=True)
    for camp in campaigns:
        yield camp, links[camp.link_id]
コード例 #16
0
ファイル: promote.py プロジェクト: j2p2/reddit
def new_promotion(title, url, selftext, user, ip):
    """
    Creates a new promotion with the provided title, etc, and sets it
    status to be 'unpaid'.
    """
    sr = Subreddit._byID(get_promote_srid())
    l = Link._submit(title, url, user, sr, ip)
    l.promoted = True
    l.disable_comments = False
    PromotionLog.add(l, "promotion created")

    if url == "self":
        l.url = l.make_permalink_slow()
        l.is_self = True
        l.selftext = selftext

    l._commit()

    update_promote_status(l, PROMOTE_STATUS.unpaid)

    # the user has posted a promotion, so enable the promote menu unless
    # they have already opted out
    if user.pref_show_promote is not False:
        user.pref_show_promote = True
        user._commit()

    # notify of new promo
    emailer.new_promo(l)
    return l
コード例 #17
0
ファイル: cloudsearch.py プロジェクト: jzplusplus/reddit
def test_run_link(start_link, count=1000):
    """Inject `count` number of links, starting with `start_link`"""
    if isinstance(start_link, basestring):
        start_link = int(start_link, 36)
    links = Link._byID(range(start_link - count, start_link), data=True, return_dict=False)
    uploader = LinkUploader(g.CLOUDSEARCH_DOC_API, things=links)
    return uploader.inject()
コード例 #18
0
ファイル: utils.py プロジェクト: 99plus2/reddit
def url_links_builder(url, exclude=None):
    from r2.models import IDBuilder, Link, NotFound
    from operator import attrgetter

    try:
        links = tup(Link._by_url(url, None))
    except NotFound:
        links = []

    links = [ link for link in links
                   if link._fullname != exclude ]
    links.sort(key=attrgetter('num_comments'), reverse=True)

    # don't show removed links in duplicates unless admin or mod
    # or unless it's your own post
    def include_link(link):
        return (not link._spam or
                (c.user_is_loggedin and
                    (link.author_id == c.user._id or
                        c.user_is_admin or
                        link.subreddit.is_moderator(c.user))))

    builder = IDBuilder([link._fullname for link in links],
                        skip=True, keep_fn=include_link)

    return builder
コード例 #19
0
    def _handle_adzerk(msg):
        data = json.loads(msg.body)
        g.log.debug('data: %s' % data)

        action = data.get('action')

        if action == 'deactivate_orphaned_flight':
            _deactivate_orphaned_flight(data['flight'])
            return

        link = Link._by_fullname(data['link'], data=True)
        if data['campaign']:
            campaign = PromoCampaign._by_fullname(data['campaign'], data=True)
        else:
            campaign = None

        if action == 'update_adzerk':
            if 'triggered_by' in data and data['triggered_by'] is not None:
                triggered_by = Account._by_fullname(data['triggered_by'], data=True)
            else:
                triggered_by = None

            _update_adzerk(link, campaign, triggered_by)

        elif action == 'deactivate_overdelivered':
            _deactivate_overdelivered(link, campaign)
コード例 #20
0
ファイル: utils.py プロジェクト: 13steinj/reddit-plugin-dfp
def dfp_creative_to_link(creative, link=None):
    from r2.models import (
        Link,
        PROMOTE_STATUS,
    )

    user = get_dfp_user()
    sr = get_dfp_subreddit()
    attributes = dfp_template_to_dict(
        creative.creativeTemplateVariableValues)

    kind = "self" if attributes["selftext"] else "link"
    url = attributes["url"] if kind == "link" else "self"

    if not link:
        link = Link._submit(
            attributes["title"], url, user, sr,
            ip="127.0.0.1", sendreplies=False,
        )

    if kind == "self":
        link.url = link.make_permalink_slow()
        link.is_self = True
        link.selftext = attributes["selftext"]

    link.promoted = True
    link.promote_status = PROMOTE_STATUS.promoted
    link.thumbnail_url = attributes["thumbnail_url"]
    link.mobile_ad_url = attributes["mobile_ad_url"]
    link.third_party_tracking = attributes["third_party_tracking"]
    link.third_party_tracking_2 = attributes["third_party_tracking_2"]
    link.dfp_creative_id = creative["id"]

    link._commit()
    return link
コード例 #21
0
ファイル: queries.py プロジェクト: denrobapps/Reddit-VM
def new_comment(comment, inbox_rels):
    author = Account._byID(comment.author_id)
    job = [get_comments(author, "new", "all")]
    if comment._deleted:
        job.append(get_all_comments())
        add_queries(job, delete_items=comment)
    else:
        # if comment._spam:
        #    sr = Subreddit._byID(comment.sr_id)
        #    job.append(get_spam_comments(sr))
        add_queries(job, insert_items=comment)
        amqp.add_item("new_comment", comment._fullname)
        if not g.amqp_host:
            l = Link._byID(comment.link_id, data=True)
            add_comment_tree(comment, l)

    # note that get_all_comments() is updated by the amqp process
    # r2.lib.db.queries.run_new_comments

    if inbox_rels:
        for inbox_rel in tup(inbox_rels):
            inbox_owner = inbox_rel._thing1
            if inbox_rel._name == "inbox":
                add_queries([get_inbox_comments(inbox_owner)], insert_items=inbox_rel)
            else:
                add_queries([get_inbox_selfreply(inbox_owner)], insert_items=inbox_rel)
            set_unread(comment, inbox_owner, True)
コード例 #22
0
ファイル: promotecontroller.py プロジェクト: Damgaard/reddit
 def query(self):
     if c.user_is_sponsor:
         if self.sort == "future_promos":
             return queries.get_all_unapproved_links()
         elif self.sort == "pending_promos":
             return queries.get_all_accepted_links()
         elif self.sort == "unpaid_promos":
             return queries.get_all_unpaid_links()
         elif self.sort == "rejected_promos":
             return queries.get_all_rejected_links()
         elif self.sort == "live_promos" and self.sr:
             return self.live_by_subreddit(self.sr)
         elif self.sort == 'live_promos':
             return queries.get_all_live_links()
         elif self.sort == 'underdelivered':
             q = queries.get_underdelivered_campaigns()
             campaigns = PromoCampaign._by_fullname(list(q), data=True,
                                                    return_dict=False)
             link_ids = [camp.link_id for camp in campaigns]
             return [Link._fullname_from_id36(to36(id)) for id in link_ids]
         elif self.sort == 'reported':
             return queries.get_reported_links(get_promote_srid())
         return queries.get_all_promoted_links()
     else:
         if self.sort == "future_promos":
             return queries.get_unapproved_links(c.user._id)
         elif self.sort == "pending_promos":
             return queries.get_accepted_links(c.user._id)
         elif self.sort == "unpaid_promos":
             return queries.get_unpaid_links(c.user._id)
         elif self.sort == "rejected_promos":
             return queries.get_rejected_links(c.user._id)
         elif self.sort == "live_promos":
             return queries.get_live_links(c.user._id)
         return queries.get_promoted_links(c.user._id)
コード例 #23
0
def _create_link(creative):

    """
    Creates a link to allow third party voting/commenting
    """

    user = _get_user()
    sr = _get_subreddit()
    attributes = _template_to_dict(creative)

    kind = "self" if attributes["selftext"] else "link"
    url = attributes["url"] if kind == "link" else "self"
    link = Link._submit(
        attributes["title"], url, user, sr,
        ip="127.0.0.1", sendreplies=False,
    )

    if kind == "self":
        link.url = link.make_permalink_slow()
        link.is_self = True
        link.selftext = attributes["selftext"]

    link.promoted = True
    link.third_party_promo = True
    link.thumbnail_url = attributes["thumbnail_url"]
    link.mobile_ad_url = attributes["mobile_ad_url"]
    link.third_party_tracking = attributes["third_party_tracking"]
    link.third_party_tracking_2 = attributes["third_party_tracking_2"]
    link.external_id = creative["id"]

    link._commit()
    return link
コード例 #24
0
ファイル: subreddit.py プロジェクト: nborwankar/reddit
    def get_links(self, sort, time):
        from r2.lib.db import queries
        from r2.models import Link
        from r2.controllers.errors import UserRequiredException

        if not c.user_is_loggedin:
            raise UserRequiredException

        friends = self.get_important_friends(c.user._id)

        if not friends:
            return []

        if g.use_query_cache:
            # with the precomputer enabled, this Subreddit only supports
            # being sorted by 'new'. it would be nice to have a
            # cleaner UI than just blatantly ignoring their sort,
            # though
            sort = "new"
            time = "all"

            friends = Account._byID(friends, return_dict=False)

            crs = [queries.get_submitted(friend, sort, time) for friend in friends]
            return queries.MergedCachedResults(crs)

        else:
            q = Link._query(Link.c.author_id == friends, sort=queries.db_sort(sort), data=True)
            if time != "all":
                q._filter(queries.db_times[time])
            return q
コード例 #25
0
    def get_links(cls, event_id):
        link_ids = cls._get_related_link_ids(event_id)
        links = Link._byID(link_ids, data=True, return_dict=False)
        links.sort(key=lambda L: L.num_comments, reverse=True)

        sr_ids = set(L.sr_id for L in links)
        subreddits = Subreddit._byID(sr_ids, data=True)

        wrapped = []
        for link in links:
            w = Wrapped(link)

            if w._spam or w._deleted:
                continue

            if not getattr(w, "allow_liveupdate", True):
                continue

            w.subreddit = subreddits[link.sr_id]

            # ideally we'd check if the user can see the subreddit, but by
            # doing this we keep everything user unspecific which makes caching
            # easier.
            if w.subreddit.type == "private":
                continue

            comment_label = ungettext("comment", "comments", link.num_comments)
            w.comments_label = strings.number_label % dict(
                num=link.num_comments, thing=comment_label)

            wrapped.append(w)
        return wrapped
コード例 #26
0
ファイル: subreddit.py プロジェクト: rajbot/tikical
 def get_links(self, sort, time):
     from r2.models import Link
     from r2.lib.db import queries
     q = Link._query(sort = queries.db_sort(sort))
     if time != 'all':
         q._filter(queries.db_times[time])
     return q
コード例 #27
0
ファイル: promote.py プロジェクト: pra85/reddit
def new_promotion(is_self, title, content, author, ip):
    """
    Creates a new promotion with the provided title, etc, and sets it
    status to be 'unpaid'.
    """
    sr = Subreddit._byID(Subreddit.get_promote_srid())
    l = Link._submit(
        is_self=is_self,
        title=title,
        content=content,
        author=author,
        sr=sr,
        ip=ip,
    )

    l.promoted = True
    l.disable_comments = False
    l.sendreplies = True
    PromotionLog.add(l, 'promotion created')

    update_promote_status(l, PROMOTE_STATUS.unpaid)

    # the user has posted a promotion, so enable the promote menu unless
    # they have already opted out
    if author.pref_show_promote is not False:
        author.pref_show_promote = True
        author._commit()

    # notify of new promo
    emailer.new_promo(l)
    return l
コード例 #28
0
ファイル: trafficpages.py プロジェクト: zeantsoi/reddit
def _use_adserver_reporting(thing):
    if not feature.is_enabled("adserver_reporting"):
        return False

    if not g.adserver_reporting_cutoff:
        return False

    try:
        cutoff = parse_date(g.adserver_reporting_cutoff)
    except ValueError:
        return False

    if isinstance(thing, PromoCampaign):
        link = Link._byID(thing.link_id)
    else:
        link = thing

    campaigns = list(PromoCampaign._by_link(link._id))

    # No campaigns, so nothing to report. Show the new
    # view anyway.
    if not campaigns:
        return True

    end_date = max(campaign.end_date for campaign in campaigns)
    end_date = end_date.replace(tzinfo=g.tz)
    cutoff = cutoff.replace(tzinfo=g.tz)

    if end_date < cutoff:
        return False

    return not feature.is_enabled("legacy_ad_reporting")
コード例 #29
0
ファイル: reddit_base.py プロジェクト: ChrisCinelli/reddit
def set_recent_clicks():
    c.recent_clicks = []
    if not c.user_is_loggedin:
        return

    click_cookie = read_user_cookie('recentclicks2')
    if click_cookie:
        if valid_click_cookie(click_cookie):
            names = [ x for x in UniqueIterator(click_cookie.split(',')) if x ]

            if len(names) > 5:
                names = names[:5]
                set_user_cookie('recentclicks2', ','.join(names))
            #eventually this will look at the user preference
            names = names[:5]

            try:
                c.recent_clicks = Link._by_fullname(names, data=True,
                                                    return_dict=False)
            except NotFound:
                # clear their cookie because it's got bad links in it
                set_user_cookie('recentclicks2', '')
        else:
            #if the cookie wasn't valid, clear it
            set_user_cookie('recentclicks2', '')
コード例 #30
0
ファイル: promote.py プロジェクト: AD42/reddit
def new_promotion(title, url, selftext, user, ip):
    """
    Creates a new promotion with the provided title, etc, and sets it
    status to be 'unpaid'.
    """
    sr = Subreddit._byID(get_promote_srid())
    l = Link._submit(title, url, user, sr, ip)
    l.promoted = True
    l.disable_comments = False
    PromotionLog.add(l, 'promotion created')

    if url == 'self':
        l.url = l.make_permalink_slow()
        l.is_self = True
        l.selftext = selftext

    l._commit()

    # set the status of the link, populating the query queue
    if c.user_is_sponsor or user.trusted_sponsor:
        set_promote_status(l, PROMOTE_STATUS.accepted)
    else:
        set_promote_status(l, PROMOTE_STATUS.unpaid)

    # the user has posted a promotion, so enable the promote menu unless
    # they have already opted out
    if user.pref_show_promote is not False:
        user.pref_show_promote = True
        user._commit()

    # notify of new promo
    emailer.new_promo(l)
    return l
コード例 #31
0
 def get_house_link_names(cls):
     now = promote.promo_datetime_now()
     pws = PromotionWeights.get_campaigns(now)
     campaign_ids = {pw.promo_idx for pw in pws}
     q = PromoCampaign._query(PromoCampaign.c._id.in_(campaign_ids),
                              PromoCampaign.c.priority_name == 'house',
                              data=True)
     link_names = {
         Link._fullname_from_id36(to36(camp.link_id))
         for camp in q
     }
     return sorted(link_names, reverse=True)
コード例 #32
0
def finalize_completed_campaigns(daysago=1):
    # PromoCampaign.end_date is utc datetime with year, month, day only
    now = datetime.datetime.now(g.tz)
    date = now - datetime.timedelta(days=daysago)
    date = date.replace(hour=0, minute=0, second=0, microsecond=0)

    q = PromoCampaign._query(
        PromoCampaign.c.end_date == date,
        # exclude no transaction
        PromoCampaign.c.trans_id != NO_TRANSACTION,
        data=True)
    # filter out freebies
    campaigns = filter(lambda camp: camp.trans_id > NO_TRANSACTION, q)

    if not campaigns:
        return

    # check that traffic is up to date
    earliest_campaign = min(campaigns, key=lambda camp: camp.start_date)
    start, end = get_total_run(earliest_campaign)
    missing_traffic = traffic.get_missing_traffic(start.replace(tzinfo=None),
                                                  date.replace(tzinfo=None))
    if missing_traffic:
        raise ValueError("Can't finalize campaigns finished on %s."
                         "Missing traffic from %s" % (date, missing_traffic))

    links = Link._byID([camp.link_id for camp in campaigns], data=True)
    underdelivered_campaigns = []

    for camp in campaigns:
        if hasattr(camp, 'refund_amount'):
            continue

        link = links[camp.link_id]
        billable_impressions = get_billable_impressions(camp)
        billable_amount = get_billable_amount(camp, billable_impressions)

        if billable_amount >= camp.total_budget_pennies:
            if hasattr(camp, 'cpm'):
                text = '%s completed with $%s billable (%s impressions @ $%s).'
                text %= (camp, billable_amount, billable_impressions,
                         camp.bid_dollars)
            else:
                text = '%s completed with $%s billable (pre-CPM).'
                text %= (camp, billable_amount)
            PromotionLog.add(link, text)
            camp.refund_amount = 0.
            camp._commit()
        elif charged_or_not_needed(camp):
            underdelivered_campaigns.append(camp)

        if underdelivered_campaigns:
            queries.set_underdelivered_campaigns(underdelivered_campaigns)
コード例 #33
0
ファイル: recommender.py プロジェクト: z0r0/saidit
def get_rising_items(omit_sr_ids, count=4):
    """Get links that are rising right now."""
    all_rising = rising.get_all_rising()
    candidate_sr_ids = {sr_id for link, score, sr_id in all_rising}.difference(omit_sr_ids)
    link_fullnames = [link for link, score, sr_id in all_rising if sr_id in candidate_sr_ids]
    link_fullnames_to_show = random_sample(link_fullnames, count)
    rising_links = Link._by_fullname(link_fullnames_to_show,
                                     return_dict=False,
                                     data=True)
    rising_items = [ExploreItem(TYPE_RISING, 'ris', Subreddit._byID(l.sr_id), l)
                   for l in rising_links]
    return rising_items
コード例 #34
0
def get_comment_items(srs, src, count=4):
    """Get hot links from srs, plus top comment from each link."""
    link_fullnames = normalized_hot([sr._id for sr in srs])
    hot_links = Link._by_fullname(link_fullnames[:count], return_dict=False)
    top_comments = []
    for link in hot_links:
        builder = CommentBuilder(link,
                                 operators.desc('_confidence'),
                                 comment=None,
                                 context=None,
                                 num=1,
                                 load_more=False)
        listing = NestedListing(builder, parent_name=link._fullname).listing()
        top_comments.extend(listing.things)
    srs = Subreddit._byID([com.sr_id for com in top_comments])
    links = Link._byID([com.link_id for com in top_comments])
    comment_items = [
        ExploreItem(TYPE_COMMENT, src, srs[com.sr_id], links[com.link_id], com)
        for com in top_comments
    ]
    return comment_items
コード例 #35
0
ファイル: voting.py プロジェクト: wizzwizz4/saidit
    def process_message(msg):
        vote_data = json.loads(msg.body)
        hook = hooks.get_hook('vote.validate_vote_data')
        if hook.call_until_return(msg=msg, vote_data=vote_data) is False:
            # Corrupt records in the queue. Ignore them.
            print "Ignoring invalid vote by %s on %s %s" % (
                vote_data.get('user_id', '<unknown>'),
                vote_data.get('thing_fullname', '<unknown>'), vote_data)
            return

        timer = g.stats.get_timer("link_vote_processor")
        timer.start()

        user = Account._byID(vote_data.pop("user_id"))
        link = Link._by_fullname(vote_data.pop("thing_fullname"))

        # create the vote and update the voter's liked/disliked under lock so
        # that the vote state and cached query are consistent
        lock_key = "vote-%s-%s" % (user._id36, link._fullname)
        with g.make_lock("voting", lock_key, timeout=5):
            print "Processing vote by %s on %s %s" % (user, link, vote_data)

            try:
                vote = Vote(
                    user,
                    link,
                    direction=vote_data["direction"],
                    date=datetime.utcfromtimestamp(vote_data["date"]),
                    data=vote_data["data"],
                    event_data=vote_data.get("event_data"),
                    # CUSTOM: voting model
                    vote_direction=vote_data["vote_direction"],
                )
            except TypeError as e:
                # a vote on an invalid type got in the queue, just skip it
                g.log.exception("Invalid type: %r", e.message)
                return

            vote.commit()
            timer.intermediate("create_vote_object")

            update_user_liked(vote)
            timer.intermediate("voter_likes")

        vote_valid = vote.is_automatic_initial_vote or vote.effects.affects_score
        link_valid = not (link._spam or link._deleted)
        if vote_valid and link_valid:
            add_to_author_query_q(link)
            add_to_subreddit_query_q(link)
            add_to_domain_query_q(link)

        timer.stop()
        timer.flush()
コード例 #36
0
ファイル: media_test.py プロジェクト: yangman-c/reddit
    def test_image_link(self):
        post = Link(is_self=True, selftext='''
Some text here.
https://example.com
https://reddit.com/a.jpg''')
        url = _get_scrape_url(post)
        self.assertEqual(url, 'https://reddit.com/a.jpg')

        post = Link(is_self=True, selftext='''
Some text here.
https://example.com
https://reddit.com/a.PNG''')
        url = _get_scrape_url(post)
        self.assertEqual(url, 'https://reddit.com/a.PNG')

        post = Link(is_self=True, selftext='''
Some text here.
https://example.com
https://reddit.com/a.jpg/b''')
        url = _get_scrape_url(post)
        self.assertEqual(url, 'https://example.com')
コード例 #37
0
 def get_links(self, sort, time):
     from r2.lib import promote
     from r2.models import Link
     from r2.lib.db import queries
     q = Link._query(sort=queries.db_sort(sort),
                     read_cache=True,
                     write_cache=True,
                     cache_time=60,
                     data=True)
     if time != 'all':
         q._filter(queries.db_times[time])
     return q
コード例 #38
0
ファイル: queries.py プロジェクト: tjr1351/reddit
def _get_links(sr_id, sort, time):
    """General link query for a subreddit."""
    q = Link._query(Link.c.sr_id == sr_id,
                    sort = db_sort(sort),
                    data = True)

    if time != 'all':
        q._filter(db_times[time])

    res = make_results(q)

    return res
コード例 #39
0
 def query(self):
     if c.user_is_sponsor:
         if self.sort == "future_promos":
             return queries.get_all_unapproved_links()
         elif self.sort == "pending_promos":
             return queries.get_all_accepted_links()
         elif self.sort == "unpaid_promos":
             return queries.get_all_unpaid_links()
         elif self.sort == "rejected_promos":
             return queries.get_all_rejected_links()
         elif self.sort == "live_promos" and self.sr:
             return self.live_by_subreddit(self.sr)
         elif self.sort == 'live_promos':
             return queries.get_all_live_links()
         elif self.sort == 'underdelivered':
             q = queries.get_underdelivered_campaigns()
             campaigns = PromoCampaign._by_fullname(list(q),
                                                    data=True,
                                                    return_dict=False)
             link_ids = [camp.link_id for camp in campaigns]
             return [Link._fullname_from_id36(to36(id)) for id in link_ids]
         elif self.sort == 'reported':
             return queries.get_reported_links(get_promote_srid())
         elif self.sort == 'house':
             campaigns = self.get_house_campaigns()
             link_ids = {camp.link_id for camp in campaigns}
             return [Link._fullname_from_id36(to36(id)) for id in link_ids]
         return queries.get_all_promoted_links()
     else:
         if self.sort == "future_promos":
             return queries.get_unapproved_links(c.user._id)
         elif self.sort == "pending_promos":
             return queries.get_accepted_links(c.user._id)
         elif self.sort == "unpaid_promos":
             return queries.get_unpaid_links(c.user._id)
         elif self.sort == "rejected_promos":
             return queries.get_rejected_links(c.user._id)
         elif self.sort == "live_promos":
             return queries.get_live_links(c.user._id)
         return queries.get_promoted_links(c.user._id)
コード例 #40
0
ファイル: promotecontroller.py プロジェクト: sattti161/reddit
    def GET_report(self, start, end, link_text=None, owner=None):
        now = datetime.now(g.tz).replace(hour=0, minute=0, second=0,
                                         microsecond=0)
        if not start or not end:
            start = promote.promo_datetime_now(offset=1).date()
            end = promote.promo_datetime_now(offset=8).date()
            c.errors.remove((errors.BAD_DATE, 'startdate'))
            c.errors.remove((errors.BAD_DATE, 'enddate'))
        end = end or now - timedelta(days=1)
        start = start or end - timedelta(days=7)

        links = []
        bad_links = []
        owner_name = owner.name if owner else ''

        if owner:
            promo_weights = PromotionWeights.get_campaigns(start, end,
                                                           author_id=owner._id)
            campaign_ids = [pw.promo_idx for pw in promo_weights]
            campaigns = PromoCampaign._byID(campaign_ids, data=True)
            link_ids = {camp.link_id for camp in campaigns.itervalues()}
            links.extend(Link._byID(link_ids, data=True, return_dict=False))

        if link_text is not None:
            id36s = link_text.replace(',', ' ').split()
            try:
                links_from_text = Link._byID36(id36s, data=True)
            except NotFound:
                links_from_text = {}

            bad_links = [id36 for id36 in id36s if id36 not in links_from_text]
            links.extend(links_from_text.values())

        content = PromoteReport(links, link_text, owner_name, bad_links, start,
                                end)
        if c.render_style == 'csv':
            return content.as_csv()
        else:
            return PromotePage(title=_("sponsored link report"),
                               content=content).render()
コード例 #41
0
ファイル: promote.py プロジェクト: johngoodleaf/reddit
 def _run(msgs, chan):
     items = [json.loads(msg.body) for msg in msgs]
     if QUEUE_ALL in items:
         # QUEUE_ALL is just an indicator to run make_daily_promotions.
         # There's no promotion log to update in this case.
         print "Received %s QUEUE_ALL message(s)" % items.count(QUEUE_ALL)
         items = [i for i in items if i != QUEUE_ALL]
     make_daily_promotions()
     links = Link._by_fullname([i["link"] for i in items])
     for item in items:
         PromotionLog.add(links[item['link']],
                          "Finished remaking current promotions (this link "
                          "was: %(message)s" % item)
コード例 #42
0
def link_from_url(path, filter_spam=False, multiple=True):
    from pylons import c
    from r2.models import IDBuilder, Link, Subreddit, NotFound

    if not path:
        return

    try:
        links = Link._by_url(path, c.site)
    except NotFound:
        return [] if multiple else None

    return filter_links(tup(links), filter_spam=filter_spam, multiple=multiple)
コード例 #43
0
def _get_related_link_ids(event_id):
    # imported here to avoid circular import
    from reddit_liveupdate.pages import make_event_url

    url = make_event_url(event_id)

    try:
        links = Link._by_url(url, sr=None)
    except NotFound:
        links = []

    links = itertools.islice(links, MAX_LINK_IDS_TO_CACHE)
    return [link._fullname for link in links]
コード例 #44
0
    def add_target_fields(self, target):
        if not target:
            return
        from r2.models import Comment, Link, Message

        self.add("target_id", target._id)
        self.add("target_fullname", target._fullname)
        self.add("target_age_seconds", target._age.total_seconds())

        target_type = target.__class__.__name__.lower()
        if target_type == "link" and target.is_self:
            target_type = "self"
        self.add("target_type", target_type)

        # If the target is an Account or Subreddit (or has a "name" attr),
        # add the target_name
        if hasattr(target, "name"):
            self.add("target_name", target.name)

        # Add info about the target's author for comments, links, & messages
        if isinstance(target, (Comment, Link, Message)):
            author = target.author_slow
            if target._deleted or author._deleted:
                self.add("target_author_id", 0)
                self.add("target_author_name", "[deleted]")
            else:
                self.add("target_author_id", author._id)
                self.add("target_author_name", author.name)

        # Add info about the url being linked to for link posts
        if isinstance(target, Link):
            self.add_text("target_title", target.title)
            if not target.is_self:
                self.add("target_url", target.url)
                self.add("target_url_domain", target.link_domain())

        # Add info about the link being commented on for comments
        if isinstance(target, Comment):
            link_fullname = Link._fullname_from_id36(to36(target.link_id))
            self.add("link_id", target.link_id)
            self.add("link_fullname", link_fullname)

        # Add info about when target was originally posted for links/comments
        if isinstance(target, (Comment, Link)):
            self.add("target_created_ts", _datetime_to_millis(target._date))

        hooks.get_hook("eventcollector.add_target_fields").call(
            event=self,
            target=target,
        )
コード例 #45
0
    def thing_lookup(self, tuples):
        links = Link._by_fullname([t.link for t in tuples],
                                  data=True,
                                  return_dict=True,
                                  stale=self.stale)

        return [
            Storage({
                'thing': links[t.link],
                '_id': links[t.link]._id,
                'weight': t.weight,
                'campaign': t.campaign
            }) for t in tuples
        ]
コード例 #46
0
    def _run_commentstree(msg):
        fname = msg.body
        comment = Comment._by_fullname(fname, data=True)

        link = Link._byID(comment.link_id, data=True)

        try:
            add_comment_tree(comment, link)
        except KeyError:
            # Hackity hack. Try to recover from a corrupted comment
            # tree
            print "Trying to fix broken comments-tree."
            link_comments(link._id, _update=True)
            add_comment_tree(comment, link)
コード例 #47
0
    def GET_show(self, meetup, sort, num_comments):
        article = Link._byID(meetup.assoc_link)

        # figure out number to show based on the menu
        user_num = c.user.pref_num_comments or g.num_comments
        num = g.max_comments if num_comments == 'true' else user_num

        builder = CommentBuilder(article, CommentSortMenu.operator(sort), None,
                                 None)
        listing = NestedListing(builder,
                                num=num,
                                parent_name=article._fullname)
        displayPane = PaneStack()

        # insert reply box only for logged in user
        if c.user_is_loggedin:
            displayPane.append(CommentReplyBox())
            displayPane.append(CommentReplyBox(link_name=article._fullname))

        # finally add the comment listing
        displayPane.append(listing.listing())

        sort_menu = CommentSortMenu(default=sort, type='dropdown2')
        nav_menus = [
            sort_menu,
            NumCommentsMenu(article.num_comments, default=num_comments)
        ]

        content = CommentListing(
            content=displayPane,
            num_comments=article.num_comments,
            nav_menus=nav_menus,
        )

        # Update last viewed time, and return the previous last viewed time.  Actually tracked on the article
        lastViewed = None
        if c.user_is_loggedin:
            clicked = article._getLastClickTime(c.user)
            lastViewed = clicked._date if clicked else None
            article._click(c.user)

        res = ShowMeetup(meetup=meetup,
                         content=content,
                         fullname=article._fullname,
                         lastViewed=lastViewed)

        return BoringPage(pagename=meetup.title,
                          content=res,
                          body_class='meetup').render()
コード例 #48
0
ファイル: all_sr.py プロジェクト: zeantsoi/reddit
def write_all_hot_cache():
    from r2.models.link import Link
    from r2.lib.db import queries

    q = Link._query(
        sort=queries.db_sort('hot'),
        limit=NUM_LINKS,
    )

    top_links = resort_links(list(q))
    link_ids = [link._fullname for link in top_links]

    g.gencache.set(CACHE_KEY, link_ids)

    return link_ids
コード例 #49
0
ファイル: migrate.py プロジェクト: wqx081/reddit
def port_cassaurls(after_id=None, estimate=15231317):
    from r2.models import Link, LinksByUrl
    from r2.lib.db import tdb_cassandra
    from r2.lib.db.operators import desc
    from r2.lib.db.tdb_cassandra import CL
    from r2.lib.utils import fetch_things2, in_chunks, progress

    q = Link._query(Link.c._spam == (True, False),
                    sort=desc('_date'),
                    data=True)
    if after_id:
        q._after(Link._byID(after_id, data=True))
    q = fetch_things2(q, chunk_size=500)
    q = progress(q, estimate=estimate)
    q = (l for l in q if getattr(l, 'url', 'self') != 'self'
         and not getattr(l, 'is_self', False))
    chunks = in_chunks(q, 500)

    for chunk in chunks:
        with LinksByUrl._cf.batch(write_consistency_level=CL.ONE) as b:
            for l in chunk:
                k = LinksByUrl._key_from_url(l.url)
                if k:
                    b.insert(k, {l._id36: l._id36})
コード例 #50
0
    def _handle_adzerk(msg):
        data = json.loads(msg.body)
        g.log.debug('data: %s' % data)

        action = data.get('action')
        link = Link._by_fullname(data['link'], data=True)
        if data['campaign']:
            campaign = PromoCampaign._by_fullname(data['campaign'], data=True)
        else:
            campaign = None

        if action == 'update_adzerk':
            _update_adzerk(link, campaign)
        elif action == 'deactivate_overdelivered':
            _deactivate_overdelivered(link, campaign)
コード例 #51
0
    def _handle_upsert_campaign(payload):
        link = Link._by_fullname(payload["link"], data=True)
        campaign = PromoCampaign._by_fullname(payload["campaign"], data=True)
        owner = Account._byID(campaign.owner_id)
        author = Account._byID(link.author_id)

        try:
            lineitem = lineitems_service.upsert_lineitem(owner, campaign)
        except ValueError as e:
            g.log.error("unable to upsert lineitem: %s" % e)
            return

        creative = creatives_service.upsert_creative(author, link)

        lineitems_service.associate_with_creative(
            lineitem=lineitem, creative=creative)
コード例 #52
0
def link_duplicates(article):
    from r2.models import Link, NotFound

    # don't bother looking it up if the link doesn't have a URL anyway
    if getattr(article, 'is_self', False):
        return []

    try:
        links = tup(Link._by_url(article.url, None))
    except NotFound:
        links = []

    duplicates = [ link for link in links
                   if link._fullname != article._fullname ]

    return duplicates
コード例 #53
0
def nameaserver_vote_lockdown(thing):
    if getattr(thing, "sr_id", None):
        sr = Subreddit._byID(thing.sr_id, data=True)
        if sr.name == g.gold_servername_sr:
            if isinstance(thing, Link):
                # no votes on links in this subreddit
                abort(403, "Forbidden")
            elif isinstance(thing, Comment):
                # only allow votes on comments in active threads by people
                # who bought gold.
                link = Link._byID(thing.link_id, data=True)

                if (hasattr(link, "revenue_date") and
                    (link.server_names
                     or c.user._id not in gold_buyers_on(link.revenue_date))):
                    abort(403, "Forbidden")
コード例 #54
0
    def __init__(self, *args, **kwargs):
        self.prices = {
            "gold_month_price": g.gold_month_price,
            "gold_year_price": g.gold_year_price,
        }

        self.partners = GoldPartner.get_all()
        self.categories = set()
        self.giveaways = []

        # batch-lookup the Links and Subreddits for discussions
        id36s = [p.discussion_id36 for p in self.partners if p.discussion_id36]
        links = Link._byID36(id36s, data=True)
        subreddits = Subreddit._byID([link.sr_id for link in links.values()],
                                     data=True)

        for partner in self.partners:
            if partner.category:
                self.categories.add(partner.category)

            extra_classes = partner.css_classes
            if partner.is_new:
                extra_classes.append('new')
            partner.extra_classes = ' '.join(extra_classes)

            if partner.giveaway_desc:
                self.giveaways.append('{0}: {1}'.format(
                    partner.name, partner.giveaway_desc))

            if partner.discussion_id36:
                link = links[partner.discussion_id36]
                subreddit = subreddits[link.sr_id]
                partner.discussion_url = link.make_permalink(subreddit)
                partner.discussion_num_comments = link.num_comments
            else:
                partner.discussion_url = None
                partner.discussion_num_comments = None

        self.categories = sorted(self.categories)

        if c.user_is_loggedin:
            self.existing_codes = GoldPartnerDealCode.get_codes_for_user(
                c.user)
        else:
            self.existing_codes = []
        BoringPage.__init__(self, *args, **kwargs)
コード例 #55
0
    def import_into_subreddit(self, sr, data, rewrite_map_file):
        posts = list(Link._query())
        for post in posts:
            post._delete_from_db()

        comments = self._query_comments()
        for comment in comments:
            comment._delete_from_db()

        for post_data in data:
            try:
                print post_data['title']
                self.process_post(post_data, sr)
            except Exception, e:
                print 'Unable to create post:\n%s\n%s\n%s' % (type(e), e,
                                                              post_data)
                raise
コード例 #56
0
    def get_links_sr_ids(self, sr_ids, sort, time):
        from r2.lib.db import queries
        from r2.models import Link

        if not sr_ids:
            return []
        else:
            srs = Subreddit._byID(sr_ids, return_dict=False)

        if g.use_query_cache:
            results = [queries.get_links(sr, sort, time) for sr in srs]
            return queries.merge_results(*results)
        else:
            q = Link._query(Link.c.sr_id == sr_ids, sort=queries.db_sort(sort))
            if time != 'all':
                q._filter(queries.db_times[time])
            return q
コード例 #57
0
def url_links_builder(url,
                      exclude=None,
                      num=None,
                      after=None,
                      reverse=None,
                      count=None,
                      public_srs_only=False):
    from r2.lib.template_helpers import add_sr
    from r2.models import IDBuilder, Link, NotFound, Subreddit
    from operator import attrgetter

    if url.startswith('/'):
        url = add_sr(url, force_hostname=True)

    try:
        links = Link._by_url(url, None)
    except NotFound:
        links = []

    links = [link for link in links if link._fullname != exclude]

    if public_srs_only and not c.user_is_admin:
        subreddits = Subreddit._byID([link.sr_id for link in links], data=True)
        links = [
            link for link in links if subreddits[link.sr_id].type != "private"
        ]

    links.sort(key=attrgetter('num_comments'), reverse=True)

    # don't show removed links in duplicates unless admin or mod
    # or unless it's your own post
    def include_link(link):
        return (not link._spam
                or (c.user_is_loggedin and
                    (link.author_id == c.user._id or c.user_is_admin
                     or link.subreddit.is_moderator(c.user))))

    builder = IDBuilder([link._fullname for link in links],
                        skip=True,
                        keep_fn=include_link,
                        num=num,
                        after=after,
                        reverse=reverse,
                        count=count)

    return builder
コード例 #58
0
def default_queries():
    from r2.models import Link, Subreddit
    from r2.lib.db.operators import desc
    from copy import deepcopy
    queries = []

    q = Link._query(Link.c.sr_id == Subreddit.user_subreddits(None),
                    sort=desc('_hot'),
                    limit=37)

    queries.append(q)
    #add a higher limit one too
    q = deepcopy(q)
    q._limit = 75
    queries.append(q)

    return queries
コード例 #59
0
    def POST_create(self, res, title, description, location, latitude,
                    longitude, timestamp, tzoffset, ip):
        if res._chk_error(errors.NO_TITLE):
            res._chk_error(errors.TITLE_TOO_LONG)
            res._focus('title')

        res._chk_errors((errors.NO_LOCATION, errors.NO_DESCRIPTION,
                         errors.INVALID_DATE, errors.NO_DATE))

        if res.error: return

        meetup = Meetup(author_id=c.user._id,
                        title=title,
                        description=description,
                        location=location,
                        latitude=latitude,
                        longitude=longitude,
                        timestamp=timestamp,
                        tzoffset=tzoffset)

        # Expire all meetups in the render cache
        g.rendercache.invalidate_key_group(Meetup.group_cache_key())

        meetup._commit()

        l = Link._submit(meetup_article_title(meetup),
                         meetup_article_text(meetup), 'self', c.user,
                         Subreddit._by_name('meetups'), ip, [])

        l.meetup = meetup._id36
        l._commit()
        meetup.assoc_link = l._id
        meetup._commit()

        when = datetime.now(g.tz) + timedelta(
            0,
            3600)  # Leave a short window of time before notification, in case
        # the meetup is edited/deleted soon after its creation
        PendingJob.store(when, 'process_new_meetup', {'meetup_id': meetup._id})

        #update the queries
        if g.write_query_queue:
            queries.new_link(l)

        res._redirect(url_for(action='show', id=meetup._id36))
コード例 #60
0
ファイル: migrate.py プロジェクト: aguamar/reddit
def add_byurl_prefix():
    """Run one before the byurl prefix is set, and once after (killing
       it after it gets when it started the first time"""

    from datetime import datetime
    from r2.models import Link
    from r2.lib.filters import _force_utf8
    from pylons import g
    from r2.lib.utils import fetch_things2
    from r2.lib.db.operators import desc
    from r2.lib.utils import base_url

    now = datetime.now(g.tz)
    print 'started at %s' % (now,)

    l_q = Link._query(
        Link.c._date < now,
        data=True,
        sort=desc('_date'))

    # from link.py
    def by_url_key(url, prefix=''):
        s = _force_utf8(base_url(url.lower()))
        return '%s%s' % (prefix, s)

    done = 0
    for links in fetch_things2(l_q, 1000, chunks=True):
        done += len(links)
        print 'Doing: %r, %s..%s' % (done, links[-1]._date, links[0]._date)

        # only links with actual URLs
        links = filter(lambda link: (not getattr(link, 'is_self', False)
                                     and getattr(link, 'url', '')),
                       links)

        # old key -> new key
        translate = dict((by_url_key(link.url),
                          by_url_key(link.url, prefix='byurl_'))
                         for link in links)

        old = g.permacache.get_multi(translate.keys())
        new = dict((translate[old_key], value)
                   for (old_key, value)
                   in old.iteritems())
        g.permacache.set_multi(new)