def forwards(self, orm): from apps.rss_feeds.models import MStarredStory from apps.social.models import MSharedStory db = settings.MONGODB starred_count = MStarredStory.objects.count() print " ---> Saving %s starred stories..." % starred_count shared_count = MSharedStory.objects.count() print " ---> Saving %s shared stories..." % shared_count start = 0 user_count = User.objects.latest('pk').pk for user_id in xrange(start, user_count): if user_id % 1000 == 0: print " ---> %s/%s" % (user_id, user_count) stories = MStarredStory.objects(user_id=user_id, story_hash__exists=False)\ .only('id', 'story_feed_id', 'story_guid')\ .read_preference( pymongo.ReadPreference.SECONDARY ) for i, story in enumerate(stories): db.newsblur.starred_stories.update({"_id": story.id}, {"$set": { "story_hash": story.feed_guid_hash }}) stories = MSharedStory.objects(user_id=user_id, story_hash__exists=False)\ .only('id', 'user_id', 'story_feed_id', 'story_guid')\ .read_preference( pymongo.ReadPreference.SECONDARY ) for i, story in enumerate(stories): db.newsblur.shared_stories.update({"_id": story.id}, {"$set": { "story_hash": story.feed_guid_hash }})
def mark_story_as_unshared(request): feed_id = int(request.POST['feed_id']) story_id = request.POST['story_id'] format = request.REQUEST.get('format', 'json') original_story_found = True story = MStory.objects(story_feed_id=feed_id, story_guid=story_id).limit(1).first() if not story: original_story_found = False shared_story = MSharedStory.objects(user_id=request.user.pk, story_feed_id=feed_id, story_guid=story_id).limit(1).first() if not shared_story: return json.json_response(request, {'code': -1, 'message': 'Shared story not found.'}) socialsubs = MSocialSubscription.objects.filter(subscription_user_id=request.user.pk) for socialsub in socialsubs: socialsub.needs_unread_recalc = True socialsub.save() logging.user(request, "~FC~SKUn-sharing ~FM%s: ~SB~FB%s" % (shared_story.story_title[:20], shared_story.comments[:30])) shared_story.delete() if original_story_found: story.count_comments() else: story = shared_story story = Feed.format_story(story) stories, profiles = MSharedStory.stories_with_comments_and_profiles([story], request.user.pk, check_all=True) if format == 'html': stories = MSharedStory.attach_users_to_stories(stories, profiles) return render_to_response('social/story_share.xhtml', { 'story': stories[0], }, context_instance=RequestContext(request)) else: return json.json_response(request, { 'code': 1, 'message': "Story unshared.", 'story': stories[0], 'user_profiles': profiles, })
def mark_story_as_unshared(request): feed_id = int(request.POST["feed_id"]) story_id = request.POST["story_id"] relative_user_id = request.POST.get("relative_user_id") or request.user.pk format = request.REQUEST.get("format", "json") original_story_found = True story = MStory.objects(story_feed_id=feed_id, story_guid=story_id).limit(1).first() if not story: original_story_found = False shared_story = ( MSharedStory.objects(user_id=request.user.pk, story_feed_id=feed_id, story_guid=story_id).limit(1).first() ) if not shared_story: return json.json_response(request, {"code": -1, "message": "Shared story not found."}) socialsubs = MSocialSubscription.objects.filter(subscription_user_id=request.user.pk) for socialsub in socialsubs: socialsub.needs_unread_recalc = True socialsub.save() logging.user( request, "~FC~SKUn-sharing ~FM%s: ~SB~FB%s" % (shared_story.story_title[:20], shared_story.comments[:30]) ) shared_story.delete() if original_story_found: story.count_comments() else: story = shared_story story = Feed.format_story(story) stories, profiles = MSharedStory.stories_with_comments_and_profiles([story], relative_user_id, check_all=True) if format == "html": stories = MSharedStory.attach_users_to_stories(stories, profiles) return render_to_response( "social/social_story.xhtml", {"story": stories[0]}, context_instance=RequestContext(request) ) else: return json.json_response( request, {"code": 1, "message": "Story unshared.", "story": stories[0], "user_profiles": profiles} )
def api_shared_story(request): user = request.user body = request.body_json after = body.get('after', None) before = body.get('before', None) limit = body.get('limit', 50) fields = body.get('triggerFields') blurblog_user = fields['blurblog_user'] entries = [] if isinstance(blurblog_user, int) or blurblog_user.isdigit(): social_user_ids = [int(blurblog_user)] elif blurblog_user == "all": socialsubs = MSocialSubscription.objects.filter(user_id=user.pk) social_user_ids = [ss.subscription_user_id for ss in socialsubs] mstories = MSharedStory.objects( user_id__in=social_user_ids ).order_by('-shared_date')[:limit] stories = Feed.format_stories(mstories) found_feed_ids = list(set([story['story_feed_id'] for story in stories])) share_user_ids = list(set([story['user_id'] for story in stories])) users = dict([(u.pk, u.username) for u in User.objects.filter(pk__in=share_user_ids).only('pk', 'username')]) feeds = dict([(f.pk, { "title": f.feed_title, "website": f.feed_link, "address": f.feed_address, }) for f in Feed.objects.filter(pk__in=found_feed_ids)]) classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, social_user_id__in=social_user_ids)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, social_user_id__in=social_user_ids)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, social_user_id__in=social_user_ids)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, social_user_id__in=social_user_ids)) # Merge with feed specific classifiers classifier_feeds = classifier_feeds + list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=found_feed_ids)) classifier_authors = classifier_authors + list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=found_feed_ids)) classifier_titles = classifier_titles + list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=found_feed_ids)) classifier_tags = classifier_tags + list(MClassifierTag.objects(user_id=user.pk, feed_id__in=found_feed_ids)) for story in stories: if before and int(story['shared_date'].strftime("%s")) > before: continue if after and int(story['shared_date'].strftime("%s")) < after: continue score = compute_story_score(story, classifier_titles=classifier_titles, classifier_authors=classifier_authors, classifier_tags=classifier_tags, classifier_feeds=classifier_feeds) if score < 0: continue feed = feeds.get(story['story_feed_id'], None) entries.append({ "StoryTitle": story['story_title'], "StoryContent": story['story_content'], "StoryURL": story['story_permalink'], "StoryAuthor": story['story_authors'], "PublishedAt": story['story_date'].strftime("%Y-%m-%dT%H:%M:%SZ"), "StoryScore": score, "Comments": story['comments'], "Username": users.get(story['user_id']), "SharedAt": story['shared_date'].strftime("%Y-%m-%dT%H:%M:%SZ"), "Site": feed and feed['title'], "SiteURL": feed and feed['website'], "SiteRSS": feed and feed['address'], "ifttt": { "id": story['story_hash'], "timestamp": int(story['shared_date'].strftime("%s")) }, }) if after: entries = sorted(entries, key=lambda s: s['ifttt']['timestamp']) logging.user(request, "~FMChecking shared stories from ~SB~FCIFTTT~SN~FM: ~SB~FM%s~FM~SN - ~SB%s~SN stories" % (blurblog_user, len(entries))) return {"data": entries}
def api_shared_story(request): user = request.user body = request.body_json after = body.get('after', None) before = body.get('before', None) limit = body.get('limit', 50) fields = body.get('triggerFields') blurblog_user = fields['blurblog_user'] entries = [] if isinstance(blurblog_user, int) or blurblog_user.isdigit(): social_user_ids = [int(blurblog_user)] elif blurblog_user == "all": socialsubs = MSocialSubscription.objects.filter(user_id=user.pk) social_user_ids = [ss.subscription_user_id for ss in socialsubs] mstories = MSharedStory.objects( user_id__in=social_user_ids ).order_by('-shared_date')[:limit] stories = Feed.format_stories(mstories) found_feed_ids = list(set([story['story_feed_id'] for story in stories])) share_user_ids = list(set([story['user_id'] for story in stories])) users = dict([(u.pk, u.username) for u in User.objects.filter(pk__in=share_user_ids).only('pk', 'username')]) feeds = dict([(f.pk, { "title": f.feed_title, "website": f.feed_link, "address": f.feed_address, }) for f in Feed.objects.filter(pk__in=found_feed_ids)]) classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, social_user_id__in=social_user_ids)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, social_user_id__in=social_user_ids)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, social_user_id__in=social_user_ids)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, social_user_id__in=social_user_ids)) # Merge with feed specific classifiers classifier_feeds = classifier_feeds + list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=found_feed_ids)) classifier_authors = classifier_authors + list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=found_feed_ids)) classifier_titles = classifier_titles + list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=found_feed_ids)) classifier_tags = classifier_tags + list(MClassifierTag.objects(user_id=user.pk, feed_id__in=found_feed_ids)) for story in stories: if before and int(story['shared_date'].strftime("%s")) > before: continue if after and int(story['shared_date'].strftime("%s")) < after: continue score = compute_story_score(story, classifier_titles=classifier_titles, classifier_authors=classifier_authors, classifier_tags=classifier_tags, classifier_feeds=classifier_feeds) if score < 0: continue feed = feeds.get(story['story_feed_id'], None) entries.append({ "StoryTitle": story['story_title'], "StoryContent": story['story_content'], "StoryURL": story['story_permalink'], "StoryAuthor": story['story_authors'], "PublishedAt": story['story_date'].strftime("%Y-%m-%dT%H:%M:%SZ"), "StoryScore": score, "Comments": story['comments'], "Username": users.get(story['user_id']), "SharedAt": story['shared_date'].strftime("%Y-%m-%dT%H:%M:%SZ"), "Site": feed and feed['title'], "SiteURL": feed and feed['website'], "SiteRSS": feed and feed['address'], "meta": { "id": story['story_hash'], "timestamp": int(story['shared_date'].strftime("%s")) }, }) if after: entries = sorted(entries, key=lambda s: s['meta']['timestamp']) logging.user(request, "~FMChecking shared stories from ~SB~FCIFTTT~SN~FM: ~SB~FM%s~FM~SN - ~SB%s~SN stories" % (blurblog_user, len(entries))) return {"data": entries}
def load_social_stories(request, user_id, username=None): start = time.time() user = get_user(request) social_user_id = int(user_id) social_user = get_object_or_404(User, pk=social_user_id) offset = int(request.REQUEST.get('offset', 0)) limit = int(request.REQUEST.get('limit', 6)) page = request.REQUEST.get('page') order = request.REQUEST.get('order', 'newest') read_filter = request.REQUEST.get('read_filter', 'all') stories = [] if page: offset = limit * (int(page) - 1) now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD) social_profile = MSocialProfile.get_user(social_user.pk) try: socialsub = MSocialSubscription.objects.get(user_id=user.pk, subscription_user_id=social_user_id) except MSocialSubscription.DoesNotExist: socialsub = None mstories = MSharedStory.objects(user_id=social_user.pk).order_by('-shared_date')[offset:offset+limit] stories = Feed.format_stories(mstories) if socialsub and (read_filter == 'unread' or order == 'oldest'): story_ids = socialsub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit) story_date_order = "%sshared_date" % ('' if order == 'oldest' else '-') if story_ids: mstories = MSharedStory.objects(user_id=social_user.pk, story_db_id__in=story_ids).order_by(story_date_order) stories = Feed.format_stories(mstories) else: mstories = MSharedStory.objects(user_id=social_user.pk).order_by('-shared_date')[offset:offset+limit] stories = Feed.format_stories(mstories) if not stories: return dict(stories=[]) checkpoint1 = time.time() stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True) story_feed_ids = list(set(s['story_feed_id'] for s in stories)) usersubs = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids) usersubs_map = dict((sub.feed_id, sub) for sub in usersubs) unsub_feed_ids = list(set(story_feed_ids).difference(set(usersubs_map.keys()))) unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids) unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds] date_delta = UNREAD_CUTOFF if socialsub and date_delta < socialsub.mark_read_date: date_delta = socialsub.mark_read_date # Get intelligence classifier for user classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, social_user_id=social_user_id)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, social_user_id=social_user_id)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, social_user_id=social_user_id)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, social_user_id=social_user_id)) # Merge with feed specific classifiers classifier_feeds = classifier_feeds + list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=story_feed_ids)) classifier_authors = classifier_authors + list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=story_feed_ids)) classifier_titles = classifier_titles + list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=story_feed_ids)) classifier_tags = classifier_tags + list(MClassifierTag.objects(user_id=user.pk, feed_id__in=story_feed_ids)) checkpoint2 = time.time() story_ids = [story['id'] for story in stories] userstories_db = MUserStory.objects(user_id=user.pk, feed_id__in=story_feed_ids, story_id__in=story_ids).only('story_id') userstories = set(us.story_id for us in userstories_db) starred_stories = MStarredStory.objects(user_id=user.pk, story_feed_id__in=story_feed_ids, story_guid__in=story_ids).only('story_guid', 'starred_date') shared_stories = MSharedStory.objects(user_id=user.pk, story_feed_id__in=story_feed_ids, story_guid__in=story_ids)\ .only('story_guid', 'shared_date', 'comments') starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories]) shared_stories = dict([(story.story_guid, dict(shared_date=story.shared_date, comments=story.comments)) for story in shared_stories]) for story in stories: story['social_user_id'] = social_user_id story_feed_id = story['story_feed_id'] # story_date = localtime_for_timezone(story['story_date'], user.profile.timezone) shared_date = localtime_for_timezone(story['shared_date'], user.profile.timezone) story['short_parsed_date'] = format_story_link_date__short(shared_date, now) story['long_parsed_date'] = format_story_link_date__long(shared_date, now) if not socialsub: story['read_status'] = 1 elif story['id'] in userstories: story['read_status'] = 1 elif story['shared_date'] < date_delta: story['read_status'] = 1 elif not usersubs_map.get(story_feed_id): story['read_status'] = 0 elif not story.get('read_status') and story['story_date'] < usersubs_map[story_feed_id].mark_read_date: story['read_status'] = 1 elif not story.get('read_status') and story['shared_date'] < date_delta: story['read_status'] = 1 # elif not story.get('read_status') and socialsub and story['shared_date'] > socialsub.last_read_date: # story['read_status'] = 0 else: story['read_status'] = 0 if story['id'] in starred_stories: story['starred'] = True starred_date = localtime_for_timezone(starred_stories[story['id']], user.profile.timezone) story['starred_date'] = format_story_link_date__long(starred_date, now) if story['id'] in shared_stories: story['shared'] = True shared_date = localtime_for_timezone(shared_stories[story['id']]['shared_date'], user.profile.timezone) story['shared_date'] = format_story_link_date__long(shared_date, now) story['shared_comments'] = strip_tags(shared_stories[story['id']]['comments']) story['intelligence'] = { 'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id'], social_user_id=social_user_id), 'author': apply_classifier_authors(classifier_authors, story), 'tags': apply_classifier_tags(classifier_tags, story), 'title': apply_classifier_titles(classifier_titles, story), } classifiers = sort_classifiers_by_feed(user=user, feed_ids=story_feed_ids, classifier_feeds=classifier_feeds, classifier_authors=classifier_authors, classifier_titles=classifier_titles, classifier_tags=classifier_tags) if socialsub: socialsub.feed_opens += 1 socialsub.save() diff1 = checkpoint1-start diff2 = checkpoint2-start logging.user(request, "~FYLoading ~FMshared stories~FY: ~SB%s%s ~SN(~SB%.4ss/%.4ss~SN)" % ( social_profile.title[:22], ('~SN/p%s' % page) if page > 1 else '', diff1, diff2)) return { "stories": stories, "user_profiles": user_profiles, "feeds": unsub_feeds, "classifiers": classifiers, }
def load_social_page(request, user_id, username=None, **kwargs): start = time.time() user = request.user social_user_id = int(user_id) social_user = get_object_or_404(User, pk=social_user_id) offset = int(request.REQUEST.get('offset', 0)) limit = int(request.REQUEST.get('limit', 6)) page = request.REQUEST.get('page') format = request.REQUEST.get('format', None) has_next_page = False feed_id = kwargs.get('feed_id') or request.REQUEST.get('feed_id') if page: offset = limit * (int(page) - 1) user_social_profile = None if user.is_authenticated(): user_social_profile = MSocialProfile.get_user(user.pk) social_profile = MSocialProfile.get_user(social_user_id) params = dict(user_id=social_user.pk) if feed_id: params['story_feed_id'] = feed_id mstories = MSharedStory.objects(**params).order_by('-shared_date')[offset:offset+limit+1] stories = Feed.format_stories(mstories) if len(stories) > limit: has_next_page = True stories = stories[:-1] checkpoint1 = time.time() if not stories: params = { "user": user, "stories": [], "feeds": {}, "social_user": social_user, "social_profile": social_profile, 'user_social_profile' : json.encode(user_social_profile and user_social_profile.page()), } template = 'social/social_page.xhtml' return render_to_response(template, params, context_instance=RequestContext(request)) story_feed_ids = list(set(s['story_feed_id'] for s in stories)) feeds = Feed.objects.filter(pk__in=story_feed_ids) feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in feeds) for story in stories: if story['story_feed_id'] in feeds: # Feed could have been deleted. story['feed'] = feeds[story['story_feed_id']] shared_date = localtime_for_timezone(story['shared_date'], social_user.profile.timezone) story['shared_date'] = shared_date stories, profiles = MSharedStory.stories_with_comments_and_profiles(stories, social_user.pk, check_all=True) checkpoint2 = time.time() if user.is_authenticated(): for story in stories: if user.pk in story['shared_by_friends'] or user.pk in story['shared_by_public']: story['shared_by_user'] = True shared_story = MSharedStory.objects.get(user_id=user.pk, story_feed_id=story['story_feed_id'], story_guid=story['id']) story['user_comments'] = shared_story.comments stories = MSharedStory.attach_users_to_stories(stories, profiles) params = { 'social_user' : social_user, 'stories' : stories, 'user_social_profile' : json.encode(user_social_profile and user_social_profile.page()), 'social_profile': social_profile, 'feeds' : feeds, 'user_profile' : hasattr(user, 'profile') and user.profile, 'has_next_page' : has_next_page, 'holzer_truism' : random.choice(jennyholzer.TRUISMS) #if not has_next_page else None } diff1 = checkpoint1-start diff2 = checkpoint2-start timediff = time.time()-start logging.user(request, "~FYLoading ~FMsocial page~FY: ~SB%s%s ~SN(%.4s seconds, ~SB%.4s/%.4s~SN)" % ( social_profile.title[:22], ('~SN/p%s' % page) if page > 1 else '', timediff, diff1, diff2)) if format == 'html': template = 'social/social_stories.xhtml' else: template = 'social/social_page.xhtml' return render_to_response(template, params, context_instance=RequestContext(request))
def load_social_page(request, user_id, username=None, **kwargs): start = time.time() user = request.user social_user_id = int(user_id) social_user = get_object_or_404(User, pk=social_user_id) offset = int(request.REQUEST.get("offset", 0)) limit = int(request.REQUEST.get("limit", 6)) page = request.REQUEST.get("page") format = request.REQUEST.get("format", None) has_next_page = False feed_id = kwargs.get("feed_id") or request.REQUEST.get("feed_id") if page: offset = limit * (int(page) - 1) user_social_profile = None user_social_services = None if user.is_authenticated(): user_social_profile = MSocialProfile.get_user(user.pk) user_social_services = MSocialServices.get_user(user.pk) social_profile = MSocialProfile.get_user(social_user_id) params = dict(user_id=social_user.pk) if feed_id: params["story_feed_id"] = feed_id mstories = MSharedStory.objects(**params).order_by("-shared_date")[offset : offset + limit + 1] stories = Feed.format_stories(mstories) if len(stories) > limit: has_next_page = True stories = stories[:-1] checkpoint1 = time.time() if not stories: params = { "user": user, "stories": [], "feeds": {}, "social_user": social_user, "social_profile": social_profile, "user_social_services": user_social_services, "user_social_profile": json.encode(user_social_profile and user_social_profile.page()), } template = "social/social_page.xhtml" return render_to_response(template, params, context_instance=RequestContext(request)) story_feed_ids = list(set(s["story_feed_id"] for s in stories)) feeds = Feed.objects.filter(pk__in=story_feed_ids) feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in feeds) for story in stories: if story["story_feed_id"] in feeds: # Feed could have been deleted. story["feed"] = feeds[story["story_feed_id"]] shared_date = localtime_for_timezone(story["shared_date"], social_user.profile.timezone) story["shared_date"] = shared_date stories, profiles = MSharedStory.stories_with_comments_and_profiles(stories, social_user.pk, check_all=True) checkpoint2 = time.time() if user.is_authenticated(): for story in stories: if user.pk in story["share_user_ids"]: story["shared_by_user"] = True shared_story = MSharedStory.objects.get( user_id=user.pk, story_feed_id=story["story_feed_id"], story_guid=story["id"] ) story["user_comments"] = shared_story.comments stories = MSharedStory.attach_users_to_stories(stories, profiles) params = { "social_user": social_user, "stories": stories, "user_social_profile": user_social_profile, "user_social_profile_page": json.encode(user_social_profile and user_social_profile.page()), "user_social_services": user_social_services, "user_social_services_page": json.encode(user_social_services and user_social_services.to_json()), "social_profile": social_profile, "feeds": feeds, "user_profile": hasattr(user, "profile") and user.profile, "has_next_page": has_next_page, "holzer_truism": random.choice(jennyholzer.TRUISMS), # if not has_next_page else None } diff1 = checkpoint1 - start diff2 = checkpoint2 - start timediff = time.time() - start logging.user( request, "~FYLoading ~FMsocial page~FY: ~SB%s%s ~SN(%.4s seconds, ~SB%.4s/%.4s~SN)" % (social_profile.title[:22], ("~SN/p%s" % page) if page > 1 else "", timediff, diff1, diff2), ) if format == "html": template = "social/social_stories.xhtml" else: template = "social/social_page.xhtml" return render_to_response(template, params, context_instance=RequestContext(request))
def load_river_blurblog(request): limit = 10 start = time.time() user = get_user(request) social_user_ids = [int(uid) for uid in request.REQUEST.getlist("social_user_ids") if uid] original_user_ids = list(social_user_ids) page = int(request.REQUEST.get("page", 1)) order = request.REQUEST.get("order", "newest") read_filter = request.REQUEST.get("read_filter", "unread") relative_user_id = request.REQUEST.get("relative_user_id", None) now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone) UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD) if not relative_user_id: relative_user_id = get_user(request).pk if not social_user_ids: socialsubs = MSocialSubscription.objects.filter(user_id=user.pk) social_user_ids = [s.subscription_user_id for s in socialsubs] offset = (page - 1) * limit limit = page * limit - 1 story_ids, story_dates = MSocialSubscription.feed_stories( user.pk, social_user_ids, offset=offset, limit=limit, order=order, read_filter=read_filter ) mstories = MStory.objects(id__in=story_ids) story_id_to_dates = dict(zip(story_ids, story_dates)) def sort_stories_by_id(a, b): return int(story_id_to_dates[str(b.id)]) - int(story_id_to_dates[str(a.id)]) sorted_mstories = sorted(mstories, cmp=sort_stories_by_id) stories = Feed.format_stories(sorted_mstories) for s, story in enumerate(stories): story["story_date"] = datetime.datetime.fromtimestamp(story_dates[s]) stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, relative_user_id, check_all=True) story_feed_ids = list(set(s["story_feed_id"] for s in stories)) usersubs = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids) usersubs_map = dict((sub.feed_id, sub) for sub in usersubs) unsub_feed_ids = list(set(story_feed_ids).difference(set(usersubs_map.keys()))) unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids) unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds] # Find starred stories if story_feed_ids: story_ids = [story["id"] for story in stories] starred_stories = MStarredStory.objects(user_id=user.pk, story_guid__in=story_ids).only( "story_guid", "starred_date" ) starred_stories = dict([(story.story_guid, story.starred_date) for story in starred_stories]) shared_stories = MSharedStory.objects(user_id=user.pk, story_guid__in=story_ids).only( "story_guid", "shared_date", "comments" ) shared_stories = dict( [ (story.story_guid, dict(shared_date=story.shared_date, comments=story.comments)) for story in shared_stories ] ) userstories_db = MUserStory.objects(user_id=user.pk, feed_id__in=story_feed_ids, story_id__in=story_ids).only( "story_id" ) userstories = set(us.story_id for us in userstories_db) else: starred_stories = {} shared_stories = {} userstories = [] # Intelligence classifiers for all feeds involved if story_feed_ids: classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id__in=story_feed_ids)) classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id__in=story_feed_ids)) classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id__in=story_feed_ids)) classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id__in=story_feed_ids)) else: classifier_feeds = [] classifier_authors = [] classifier_titles = [] classifier_tags = [] classifiers = sort_classifiers_by_feed( user=user, feed_ids=story_feed_ids, classifier_feeds=classifier_feeds, classifier_authors=classifier_authors, classifier_titles=classifier_titles, classifier_tags=classifier_tags, ) # Just need to format stories for story in stories: if story["id"] in userstories: story["read_status"] = 1 elif story["story_date"] < UNREAD_CUTOFF: story["read_status"] = 1 else: story["read_status"] = 0 story_date = localtime_for_timezone(story["story_date"], user.profile.timezone) story["short_parsed_date"] = format_story_link_date__short(story_date, now) story["long_parsed_date"] = format_story_link_date__long(story_date, now) if story["id"] in starred_stories: story["starred"] = True starred_date = localtime_for_timezone(starred_stories[story["id"]], user.profile.timezone) story["starred_date"] = format_story_link_date__long(starred_date, now) story["intelligence"] = { "feed": apply_classifier_feeds(classifier_feeds, story["story_feed_id"]), "author": apply_classifier_authors(classifier_authors, story), "tags": apply_classifier_tags(classifier_tags, story), "title": apply_classifier_titles(classifier_titles, story), } if story["id"] in shared_stories: story["shared"] = True shared_date = localtime_for_timezone(shared_stories[story["id"]]["shared_date"], user.profile.timezone) story["shared_date"] = format_story_link_date__long(shared_date, now) story["shared_comments"] = strip_tags(shared_stories[story["id"]]["comments"]) diff = time.time() - start timediff = round(float(diff), 2) logging.user( request, "~FYLoading ~FCriver blurblogs stories~FY: ~SBp%s~SN (%s/%s " "stories, ~SN%s/%s/%s feeds)" % (page, len(stories), len(mstories), len(story_feed_ids), len(social_user_ids), len(original_user_ids)), ) return { "stories": stories, "user_profiles": user_profiles, "feeds": unsub_feeds, "classifiers": classifiers, "elapsed_time": timediff, }