예제 #1
0
    def get_dates_in_discussion_life_bounds(self,
                                            start_date=None,
                                            end_date=None,
                                            force_bounds=False):
        """
        @parameter start_date: string
        @parameter end_date: string
        """
        from datetime import datetime
        from assembl.lib.parsedatetime import parse_datetime

        start = start_date
        end = end_date
        discussion = self

        if start:
            start = parse_datetime(start)
            if force_bounds and start:
                discussion_lower_bound = discussion.creation_date
                if start < discussion_lower_bound:
                    start = discussion_lower_bound
        else:
            start = discussion.creation_date
        if end:
            end = parse_datetime(end)
            if force_bounds and end:
                if end < start:
                    end = start
                discussion_upper_bound = datetime.now()
                if end > discussion_upper_bound:
                    end = discussion_upper_bound
        else:
            end = datetime.now()
        return (start, end)
예제 #2
0
def get_visit_count(request):
    import isodate
    from datetime import datetime
    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []
    if interval:
        while start < end:
            this_end = min(start + interval, end)
            results.append(
                dict(start=start.isoformat(),
                     end=this_end.isoformat(),
                     readers=discussion.count_post_viewers(start, this_end),
                     first_visitors=discussion.count_new_visitors(
                         start, this_end)))
            start = this_end
    else:
        r = dict(readers=discussion.count_post_viewers(start, end),
                 first_visitors=discussion.count_new_visitors(start, end))
        if not start:
            from assembl.models import Post
            from sqlalchemy import func
            (start, ) = discussion.db.query(func.min(
                Post.creation_date)).filter_by(
                    discussion_id=discussion.id).first()
        r["start"] = start.isoformat()
        if not end:
            end = datetime.now()
        r["end"] = end.isoformat()
        results.append(r)
    if not (request.GET.get('format', None) == 'csv'
            or request.accept == 'text/csv'):
        # json default
        return Response(json.dumps(results), content_type='application/json')
    # otherwise assume csv
    from csv import DictWriter
    output = StringIO()
    csv = DictWriter(output,
                     fieldnames=['start', 'end', 'first_visitors', 'readers'])
    csv.writeheader()
    for r in results:
        csv.writerow(r)
    output.seek(0)
    return Response(body_file=output, content_type='text/csv')
예제 #3
0
def get_visit_count(request):
    import isodate
    from datetime import datetime
    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []
    if interval:
        while start < end:
            this_end = min(start+interval, end)
            results.append(dict(
                start=start.isoformat(), end=this_end.isoformat(),
                readers=discussion.count_post_viewers(
                    start, this_end),
                first_visitors=discussion.count_new_visitors(
                    start, this_end)))
            start = this_end
    else:
        r = dict(
            readers=discussion.count_post_viewers(start, end),
            first_visitors=discussion.count_new_visitors(start, end))
        if not start:
            from assembl.models import AgentStatusInDiscussion
            from sqlalchemy import func
            (start,) = discussion.db.query(
                func.min(AgentStatusInDiscussion.first_visit)).filter_by(
                discussion_id=discussion.id).first()
        r["start"] = start.isoformat()
        if not end:
            end = datetime.now()
        r["end"] = end.isoformat()
        results.append(r)
    if not (request.GET.get('format', None) == 'csv'
            or request.accept == 'text/csv'):
        # json default
        return Response(json.dumps(results), content_type='application/json')
    # otherwise assume csv
    fieldnames=['start', 'end', 'first_visitors', 'readers']
    return csv_response(fieldnames, results)
예제 #4
0
    def has_notification(self):
        settings = self.settings_json
        notifications = settings.get('notifications', [])
        now = datetime.utcnow()

        for notification in notifications:
            try:
                start = parse_datetime(notification['start'])
                end = notification.get('end', None)
                end = parse_datetime(end) if end else datetime.max
                if now < start or now > end:
                    continue
            except (ValueError, TypeError, KeyError) as e:
                continue
            notification_data = self.notification_data(notification)
            if notification_data:
                yield notification_data
예제 #5
0
    def has_notification(self):
        settings = self.settings_json
        notifications = settings.get('notifications', [])
        now = datetime.utcnow()

        for notification in notifications:
            try:
                start = parse_datetime(notification['start'])
                end = notification.get('end', None)
                end = parse_datetime(end) if end else datetime.max
                if now < start or now > end:
                    continue
            except (ValueError, TypeError, KeyError):
                continue
            notification_data = self.notification_data(notification)
            if notification_data:
                yield notification_data
예제 #6
0
 def notification_data(self, data):
     end = data.get('end', None)
     time_to_end = (parse_datetime(end) -
                    datetime.utcnow()).total_seconds() if end else None
     return dict(data,
                 widget_url=self.uri(),
                 time_to_end=time_to_end,
                 num_participants=self.num_participants(),
                 num_ideas=len(self.generated_idea_links))
예제 #7
0
 def notification_data(self, data):
     end = data.get('end', None)
     time_to_end = (parse_datetime(end) - datetime.utcnow()
                    ).total_seconds() if end else None
     return dict(
         data,
         widget_url=self.uri(),
         time_to_end=time_to_end,
         num_participants=self.num_participants(),
         num_ideas=len(self.generated_idea_links))
예제 #8
0
def get_ideas(request):
    user_id = authenticated_userid(request) or Everyone
    discussion = request.context
    view_def = request.GET.get('view')
    ids = request.GET.getall('ids')
    modified_after = request.GET.get('modified_after')
    if modified_after:
        modified_after = parse_datetime(modified_after, True)
    return _get_ideas_real(request,
                           view_def=view_def,
                           ids=ids,
                           user_id=user_id,
                           modified_after=modified_after)
예제 #9
0
def get_visit_count(request):
    import isodate
    from datetime import datetime

    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []
    if interval:
        while start < end:
            this_end = min(start + interval, end)
            results.append(
                dict(
                    start=start.isoformat(),
                    end=this_end.isoformat(),
                    readers=discussion.count_post_viewers(start, this_end),
                    first_visitors=discussion.count_new_visitors(start, this_end),
                )
            )
            start = this_end
    else:
        r = dict(
            readers=discussion.count_post_viewers(start, end), first_visitors=discussion.count_new_visitors(start, end)
        )
        if not start:
            from assembl.models import Post
            from sqlalchemy import func

            (start,) = discussion.db.query(func.min(Post.creation_date)).filter_by(discussion_id=discussion.id).first()
        r["start"] = start.isoformat()
        if not end:
            end = datetime.now()
        r["end"] = end.isoformat()
        results.append(r)
    if not (request.GET.get("format", None) == "csv" or request.accept == "text/csv"):
        # json default
        return Response(json.dumps(results), content_type="application/json")
    # otherwise assume csv
    from csv import DictWriter

    output = StringIO()
    csv = DictWriter(output, fieldnames=["start", "end", "first_visitors", "readers"])
    csv.writeheader()
    for r in results:
        csv.writerow(r)
    output.seek(0)
    return Response(body_file=output, content_type="text/csv")
예제 #10
0
def get_contribution_count(request):
    import isodate
    from datetime import datetime
    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []
    if interval:
        while start < end:
            this_end = min(start + interval, end)
            results.append(
                dict(start=start.isoformat(),
                     end=this_end.isoformat(),
                     count=discussion.count_contributions_per_agent(
                         start, this_end)))
            start = this_end
    else:
        r = dict(count=discussion.count_contributions_per_agent(start, end))
        if not start:
            from assembl.models import Post
            from sqlalchemy import func
            (start, ) = discussion.db.query(func.min(
                Post.creation_date)).filter_by(
                    discussion_id=discussion.id).first()
        r["start"] = start.isoformat()
        if not end:
            end = datetime.now()
        r["end"] = end.isoformat()
        results.append(r)
    if not (request.GET.get('format', None) == 'csv'
            or request.accept == 'text/csv'):
        # json default
        for v in results:
            v['count'] = {
                agent.display_name(): count
                for (agent, count) in v['count']
            }
        return Response(json.dumps(results), content_type='application/json')
    # otherwise assume csv
    from csv import writer
    total_count = defaultdict(int)
    agents = {}
    for v in results:
        as_dict = {}
        for (agent, count) in v['count']:
            total_count[agent.id] += count
            as_dict[agent.id] = count
            agents[agent.id] = agent
        v['count'] = as_dict
    count_list = total_count.items()
    count_list.sort(key=lambda (a, c): c, reverse=True)
    output = StringIO()
    csv = writer(output)
    csv.writerow(['Start'] + [x['start'] for x in results] + ['Total'])
    csv.writerow(['End'] + [x['end'] for x in results] + [''])
    for agent_id, total_count in count_list:
        agent = agents[agent_id]
        agent_name = (agent.display_name() or agent.real_name()
                      or agent.get_preferred_email())
        csv.writerow([agent_name.encode('utf-8')] +
                     [x['count'].get(agent_id, '')
                      for x in results] + [total_count])
    output.seek(0)
    return Response(body_file=output, content_type='text/csv')
예제 #11
0
파일: post.py 프로젝트: Lornz-/assembl
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread
    order can be chronological, reverse_chronological
    message, is_unread=false returns only read messages)
    """
    localizer = request.localizer
    discussion_id = int(request.matchdict['discussion_id'])
    discussion = Discussion.get(int(discussion_id))
    if not discussion:
        raise HTTPNotFound(localizer.translate(
            _("No discussion found with id=%s")) % discussion_id)

    discussion.import_from_sources()

    user_id = authenticated_userid(request) or Everyone
    permissions = get_permissions(user_id, discussion_id)

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [ 
        filter_name for filter_name \
        in request.GET.getone('filters').split(',') \
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    text_search = request.GET.get('text_search', None)

    order = request.GET.get('order')
    if order == None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score')
    if order == 'score':
        assert text_search is not None

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = get_database_id("Post", root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = get_database_id("Post", family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = get_database_id("Idea", root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [get_database_id("Post", id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = get_database_id("AgentProfile", post_author_id)
        assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = get_database_id("AgentProfile", post_replies_to)
        assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    ideaContentLinkQuery = discussion.db.query(
        PostClass.id, PostClass.idea_content_links_above_post)
    if order == 'score':
        posts = discussion.db.query(PostClass, Content.body_text_index.score_name)
    else:
        posts = discussion.db.query(PostClass)

    posts = posts.filter(
        PostClass.discussion_id == discussion_id,
    )
    ideaContentLinkQuery = ideaContentLinkQuery.filter(
        PostClass.discussion_id == discussion_id)
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    only_orphan = request.GET.get('only_orphan')
    if only_orphan == "true":
        if root_idea_id:
            raise HTTPBadRequest(localizer.translate(
                _("Getting orphan posts of a specific idea isn't supported.")))
        orphans = text(Idea._get_orphan_posts_statement(),
                        bindparams=[bindparam('discussion_id', discussion_id)]
                        ).columns(column('post_id')).alias('orphans')
        posts = posts.join(orphans, PostClass.id==orphans.c.post_id)
        ideaContentLinkQuery = ideaContentLinkQuery.join(
            orphans, PostClass.id==orphans.c.post_id)
    elif only_orphan == "false":
        raise HTTPBadRequest(localizer.translate(
            _("Getting non-orphan posts isn't supported.")))

    # "true" means hidden only, "false" (default) means visible only. "any" means both.
    hidden = request.GET.get('hidden_messages', "false")
    if hidden != 'any':
        posts = posts.filter(PostClass.hidden==asbool(hidden))
        ideaContentLinkQuery = ideaContentLinkQuery.filter(
            PostClass.hidden==asbool(hidden))

    if root_idea_id:
        related = text(Idea._get_related_posts_statement(),
                    bindparams=[bindparam('root_idea_id', root_idea_id),
                    bindparam('discussion_id', discussion_id)]
                    ).columns(column('post_id')).alias('related')
        #Virtuoso bug: This should work...
        #posts = posts.join(related, PostClass.id==related.c.post_id)
        posts = posts.join(related, PostClass.id == related.c.post_id)
        ideaContentLinkQuery = ideaContentLinkQuery.join(
            related, PostClass.id == related.c.post_id)
    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter(
            (Post.ancestry.like(
            root_post.ancestry + cast(root_post.id, String) + ',%'
            ))
            |
            (PostClass.id==root_post.id)
            )
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter(
            (Post.ancestry.like(
            root_post.ancestry + cast(root_post.id, String) + ',%'
            ))
            |
            (PostClass.id==root_post.id)
            |
            (PostClass.id.in_(ancestor_ids))
            )
        ideaContentLinkQuery = ideaContentLinkQuery.filter(
            (Post.ancestry.like(
            root_post.ancestry + cast(root_post.id, String) + ',%'
            ))
            |
            (PostClass.id==root_post.id)
            |
            (PostClass.id.in_(ancestor_ids))
            )
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))
        ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
            ideaContentLinkQuery = ideaContentLinkQuery.filter(
                PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
            ideaContentLinkQuery = posts.filter(
                ideaContentLinkQuery.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)
        ideaContentLinkQuery = ideaContentLinkQuery.filter(
            PostClass.creator_id == post_author_id)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)
        ideaContentLinkQuery = ideaContentLinkQuery.join(
            parent_alias, PostClass.parent)
        ideaContentLinkQuery = ideaContentLinkQuery.filter(
            parent_alias.creator_id == post_replies_to)
    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    translations = None
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {v.post_id for v in discussion.db.query(
            ViewPost).filter(
                ViewPost.tombstone_condition(),
                ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion_id))}
        liked_posts = {l.post_id: l.id for l in discussion.db.query(
            LikedPost).filter(
                LikedPost.tombstone_condition(),
                LikedPost.actor_id == user_id,
                *LikedPost.get_discussion_conditions(discussion_id))}
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost, and_(
                    ViewPost.actor_id==user_id,
                    ViewPost.post_id==PostClass.id,
                    ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
        user = AgentProfile.get(user_id)
        service = discussion.translation_service()
        if service:
            translations = user_pref_as_translation_table(user, service)
    else:
        #If there is no user_id, all posts are always unread
        if is_unread == "false":
            raise HTTPBadRequest(localizer.translate(
                _("You must be logged in to view which posts are read")))

    if text_search is not None:
        # another Virtuoso bug: offband kills score. but it helps speed.
        offband = () if (order == 'score') else None
        posts = posts.filter(Post.body_text_index.contains(
            text_search.encode('utf-8'), offband=offband))
        ideaContentLinkQuery = ideaContentLinkQuery.filter(
            Post.body_text_index.contains(
                text_search.encode('utf-8'), offband=offband))

    # posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def == 'id_only':
        pass  # posts = posts.options(defer(Post.body))
    else:
        posts = posts.options(
            # undefer(Post.idea_content_links_above_post),
            joinedload_all(Post.creator),
            joinedload_all(Post.extracts),
            joinedload_all(Post.widget_idea_links),
            joinedload_all(SynthesisPost.publishes_synthesis),
            subqueryload_all(Post.attachments))
        if len(discussion.discussion_locales) > 1:
            posts = posts.options(*Content.subqueryload_options())
        else:
            posts = posts.options(*Content.joinedload_options())
        ideaContentLinkCache = dict(ideaContentLinkQuery.all())

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(Content.body_text_index.score_name.desc())
    else:
        posts = posts.order_by(Content.id)
    print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    for query_result in posts:
        score, viewpost, likedpost = None, None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        if user_id != Everyone:
            viewpost = post.id in read_posts
            likedpost = liked_posts.get(post.id, None)
            if view_def != "id_only":
                translate_content(
                    post, translation_table=translations, service=service)
        no_of_posts += 1
        serializable_post = post.generic_json(
            view_def, user_id, permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(
                actor_id=user_id,
                post=root_post
                )
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        # serializable_post['liked'] = likedpost.uri() if likedpost else False
        serializable_post['liked'] = (
            LikedPost.uri_generic(likedpost) if likedpost else False)
        if view_def != "id_only":
            serializable_post['indirect_idea_content_links'] = (
                post.indirect_idea_content_links_with_cache(
                    ideaContentLinkCache.get(post.id, None)))

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to 
    # calculate the counts with a separate query to have the right number of 
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion_id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(float(data["total"])/page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size-1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size-1)
    data["posts"] = post_data

    return data
예제 #12
0
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread
     message, is_unread=false returns only read messages)
    order: can be chronological, reverse_chronological, popularity
    root_post_id: all posts below the one specified.
    family_post_id: all posts below the one specified, and all its ancestors.
    post_reply_to: replies to a given post
    root_idea_id: all posts associated with the given idea
    ids: explicit message ids.
    posted_after_date, posted_before_date: date selection (ISO format)
    post_author: filter by author
    classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!"
    """
    localizer = request.localizer
    discussion_id = int(request.matchdict['discussion_id'])
    discussion = Discussion.get(int(discussion_id))
    if not discussion:
        raise HTTPNotFound(
            localizer.translate(_("No discussion found with id=%s")) %
            discussion_id)

    discussion.import_from_sources()

    user_id = authenticated_userid(request) or Everyone
    permissions = get_permissions(user_id, discussion_id)

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [
        filter_name for filter_name in request.GET.getone('filters').split(',')
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    text_search = request.GET.get('text_search', None)

    order = request.GET.get('order')
    if order is None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score',
                     'popularity')
    if order == 'score':
        assert text_search is not None

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = get_database_id("Post", root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = get_database_id("Post", family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = get_database_id("Idea", root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [get_database_id("Post", id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = get_database_id("AgentProfile", post_author_id)
        assert AgentProfile.get(
            post_author_id
        ), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = get_database_id("AgentProfile", post_replies_to)
        assert AgentProfile.get(
            post_replies_to
        ), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')
    message_classifiers = request.GET.getall('classifier')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    if order == 'score':
        posts = discussion.db.query(PostClass,
                                    Content.body_text_index.score_name)
    else:
        posts = discussion.db.query(PostClass)

    posts = posts.filter(PostClass.discussion_id == discussion_id, )
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    # True means deleted only, False (default) means non-deleted only. None means both.

    # v0
    # deleted = request.GET.get('deleted', None)
    # end v0

    # v1: we would like something like that
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if view_def == 'id_only':
    #         deleted = None
    #     else:
    #         deleted = False
    # end v1

    # v2
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if not ids:
    #         deleted = False
    #     else:
    #         deleted = None
    #
    # if deleted == 'false':
    #     deleted = False
    #     posts = posts.filter(PostClass.tombstone_condition())
    # elif deleted == 'true':
    #     deleted = True
    #     posts = posts.filter(PostClass.not_tombstone_condition())
    # elif deleted == 'any':
    #     deleted = None
    #     # result will contain deleted and non-deleted posts
    #     pass
    # end v2

    # v3
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if not ids:
    #         deleted = False
    #     else:
    #         deleted = None

    # if deleted == 'true':
    #     deleted = True
    #     posts = posts.filter(PostClass.not_tombstone_condition())
    # end v3

    # v4
    deleted = request.GET.get('deleted', None)
    if deleted is None:
        if not ids:
            deleted = False
        else:
            deleted = None
    elif deleted.lower() == "any":
        deleted = None
    else:
        deleted = asbool(deleted)
    # if deleted is not in (False, True, None):
    #    deleted = False
    # end v4

    only_orphan = asbool(request.GET.get('only_orphan', False))
    if only_orphan:
        if root_idea_id:
            raise HTTPBadRequest(
                localizer.translate(
                    _("Getting orphan posts of a specific idea isn't supported."
                      )))
        orphans = Idea._get_orphan_posts_statement(
            discussion_id, True, include_deleted=deleted).subquery("orphans")
        posts = posts.join(orphans, PostClass.id == orphans.c.post_id)

    if root_idea_id:
        related = Idea.get_related_posts_query_c(discussion_id,
                                                 root_idea_id,
                                                 True,
                                                 include_deleted=deleted)
        posts = posts.join(related, PostClass.id == related.c.post_id)
    elif not only_orphan:
        if deleted is not None:
            if deleted:
                posts = posts.filter(
                    PostClass.publication_state.in_(
                        deleted_publication_states))
            else:
                posts = posts.filter(PostClass.tombstone_date == None)

    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id))
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id)
                             | (PostClass.id.in_(ancestor_ids)))
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)

    if message_classifiers:
        if any([len(classifier) == 0 for classifier in message_classifiers]):
            return {'total': 0, 'posts': []}
        polarities = [
            classifier[0] != "!" for classifier in message_classifiers
        ]
        polarity = all(polarities)
        if not polarity:
            message_classifiers = [c.strip("!") for c in message_classifiers]
        if polarity != any(polarities):
            raise HTTPBadRequest(
                _("Do not combine negative and positive classifiers"))
        # Treat null as no classifier
        includes_null = 'null' in message_classifiers
        if includes_null:
            message_classifiers_nonull = filter(lambda c: c != "null",
                                                message_classifiers)
        if polarity:
            if len(message_classifiers) == 1:
                term = PostClass.message_classifier == (
                    None if includes_null else message_classifiers[0])
            else:
                term = PostClass.message_classifier.in_(
                    message_classifiers_nonull)
                if includes_null:
                    term = term | (PostClass.message_classifier == None)
        else:
            if len(message_classifiers) == 1:
                term = PostClass.message_classifier != (
                    None if includes_null else message_classifiers[0])
            else:
                term = PostClass.message_classifier.notin_(
                    message_classifiers_nonull)
            if not includes_null:
                term = term | (PostClass.message_classifier == None)
        posts = posts.filter(term)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)
    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    translations = None
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {
            v.post_id
            for v in discussion.db.query(ViewPost).filter(
                ViewPost.tombstone_condition(), ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion_id))
        }
        my_sentiments = {
            l.post_id: l
            for l in discussion.db.query(SentimentOfPost).filter(
                SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id
                == user_id,
                *SentimentOfPost.get_discussion_conditions(discussion_id))
        }
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost,
                and_(ViewPost.actor_id == user_id,
                     ViewPost.post_id == PostClass.id,
                     ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
        user = AgentProfile.get(user_id)
        service = discussion.translation_service()
        if service:
            translations = PrefCollectionTranslationTable(
                service, LanguagePreferenceCollection.getCurrent(request))
    else:
        #If there is no user_id, all posts are always unread
        my_sentiments = {}
        if is_unread == "false":
            raise HTTPBadRequest(
                localizer.translate(
                    _("You must be logged in to view which posts are read")))

    if text_search is not None:
        # another Virtuoso bug: offband kills score. but it helps speed.
        offband = () if (order == 'score') else None
        posts = posts.filter(
            Post.body_text_index.contains(text_search.encode('utf-8'),
                                          offband=offband))

    # posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def == 'id_only':
        pass  # posts = posts.options(defer(Post.body))
    else:
        ideaContentLinkQuery = posts.with_entities(
            PostClass.id, PostClass.idea_content_links_above_post)
        ideaContentLinkCache = dict(ideaContentLinkQuery.all())
        # Note: we could count the like the same way and kill the subquery.
        # But it interferes with the popularity order,
        # and the benefit is not that high.
        sentiment_counts = discussion.db.query(
            PostClass.id, SentimentOfPost.type,
            count(SentimentOfPost.id)).join(SentimentOfPost).filter(
                PostClass.id.in_(posts.with_entities(PostClass.id).subquery()),
                SentimentOfPost.tombstone_condition()).group_by(
                    PostClass.id, SentimentOfPost.type)
        sentiment_counts_by_post_id = defaultdict(dict)
        for (post_id, sentiment_type, sentiment_count) in sentiment_counts:
            sentiment_counts_by_post_id[post_id][sentiment_type[
                SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count
        posts = posts.options(
            # undefer(Post.idea_content_links_above_post),
            joinedload_all(Post.creator),
            joinedload_all(Post.extracts),
            joinedload_all(Post.widget_idea_links),
            joinedload_all(SynthesisPost.publishes_synthesis),
            subqueryload_all(Post.attachments))
        if len(discussion.discussion_locales) > 1:
            posts = posts.options(*Content.subqueryload_options())
        else:
            posts = posts.options(*Content.joinedload_options())

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(Content.body_text_index.score_name.desc())
    elif order == 'popularity':
        # assume reverse chronological otherwise
        posts = posts.order_by(Content.disagree_count - Content.like_count,
                               Content.creation_date.desc())
    else:
        posts = posts.order_by(Content.id)
    # print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    if deleted is True:
        # We just got deleted posts, now we want their ancestors for context
        post_ids = set()
        ancestor_ids = set()

        def add_ancestors(post):
            post_ids.add(post.id)
            ancestor_ids.update(
                [int(x) for x in post.ancestry.strip(",").split(",") if x])

        posts = list(posts)
        for post in posts:
            add_ancestors(post)
        ancestor_ids -= post_ids
        if ancestor_ids:
            ancestors = discussion.db.query(PostClass).filter(
                PostClass.id.in_(ancestor_ids))
            if view_def == 'id_only':
                pass  # ancestors = ancestors.options(defer(Post.body))
            else:
                ancestors = ancestors.options(
                    # undefer(Post.idea_content_links_above_post),
                    joinedload_all(Post.creator),
                    joinedload_all(Post.extracts),
                    joinedload_all(Post.widget_idea_links),
                    joinedload_all(SynthesisPost.publishes_synthesis),
                    subqueryload_all(Post.attachments))
                if len(discussion.discussion_locales) > 1:
                    ancestors = ancestors.options(
                        *Content.subqueryload_options())
                else:
                    ancestors = ancestors.options(
                        *Content.joinedload_options())
            posts.extend(ancestors.all())

    for query_result in posts:
        score, viewpost = None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        if deleted is True:
            add_ancestors(post)

        if user_id != Everyone:
            viewpost = post.id in read_posts
            if view_def != "id_only":
                translate_content(post,
                                  translation_table=translations,
                                  service=service)
        no_of_posts += 1
        serializable_post = post.generic_json(view_def, user_id,
                                              permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(actor_id=user_id, post=root_post)
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        my_sentiment = my_sentiments.get(post.id, None)
        if my_sentiment is not None:
            my_sentiment = my_sentiment.generic_json('default', user_id,
                                                     permissions)
        serializable_post['my_sentiment'] = my_sentiment
        if view_def != "id_only":
            serializable_post['indirect_idea_content_links'] = (
                post.indirect_idea_content_links_with_cache(
                    ideaContentLinkCache.get(post.id, None)))
            serializable_post[
                'sentiment_counts'] = sentiment_counts_by_post_id[post.id]

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to
    # calculate the counts with a separate query to have the right number of
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion_id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(float(data["total"]) / page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size - 1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size - 1)
    data["posts"] = post_data

    return data
예제 #13
0
def get_time_series_analytics(request):
    import isodate
    from datetime import datetime
    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    user_id = authenticated_userid(request) or Everyone
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []

    from sqlalchemy import Table, MetaData, and_, case, cast, Float
    from sqlalchemy.exc import ProgrammingError
    import pprint
    import transaction
    with transaction.manager:
        metadata = MetaData(discussion.db.get_bind()
                            )  # make sure we are using the same connexion

        intervals_table = Table(
            'temp_table_intervals_' + str(user_id),
            metadata,
            Column('interval_id', Integer, primary_key=True),
            Column('interval_start', DateTime, nullable=False),
            Column('interval_end', DateTime, nullable=False),
            prefixes=None if discussion.using_virtuoso else ['TEMPORARY'])
        try:
            intervals_table.drop(
            )  # In case there is a leftover from a previous crash
        except ProgrammingError:
            pass
        intervals_table.create()
        interval_start = start
        intervals = []
        if interval:
            while interval_start < end:
                interval_end = min(interval_start + interval, end)
                intervals.append({
                    'interval_start': interval_start,
                    'interval_end': interval_end
                })
                interval_start = interval_start + interval
            #pprint.pprint(intervals)
            discussion.db.execute(intervals_table.insert(), intervals)
        else:
            raise HTTPBadRequest("Please specify an interval")

        from assembl.models import Post, AgentProfile, AgentStatusInDiscussion, ViewPost

        # The posters
        post_subquery = discussion.db.query(
            intervals_table.c.interval_id,
            func.count(distinct(Post.id)).label('count_posts'),
            func.count(distinct(Post.creator_id)).label('count_post_authors'),
            # func.DB.DBA.BAG_AGG(Post.creator_id).label('post_authors'),
            # func.DB.DBA.BAG_AGG(Post.id).label('post_ids'),
        )
        post_subquery = post_subquery.outerjoin(
            Post,
            and_(Post.creation_date >= intervals_table.c.interval_start,
                 Post.creation_date < intervals_table.c.interval_end,
                 Post.discussion_id == discussion.id))
        post_subquery = post_subquery.group_by(intervals_table.c.interval_id)
        post_subquery = post_subquery.subquery()

        # The cumulative posters
        cumulative_posts_aliased = aliased(Post)
        cumulative_posts_subquery = discussion.db.query(
            intervals_table.c.interval_id,
            func.count(distinct(
                cumulative_posts_aliased.id)).label('count_cumulative_posts'),
            func.count(distinct(cumulative_posts_aliased.creator_id)).label(
                'count_cumulative_post_authors')
            # func.DB.DBA.BAG_AGG(cumulative_posts_aliased.id).label('cumulative_post_ids')
        )
        cumulative_posts_subquery = cumulative_posts_subquery.outerjoin(
            cumulative_posts_aliased,
            and_(
                cumulative_posts_aliased.creation_date <
                intervals_table.c.interval_end,
                cumulative_posts_aliased.discussion_id == discussion.id))
        cumulative_posts_subquery = cumulative_posts_subquery.group_by(
            intervals_table.c.interval_id)
        cumulative_posts_subquery = cumulative_posts_subquery.subquery()

        # The post viewers
        postViewers = aliased(ViewPost)
        viewedPosts = aliased(Post)
        post_viewers_subquery = discussion.db.query(
            intervals_table.c.interval_id,
            func.count(distinct(
                postViewers.actor_id)).label('UNRELIABLE_count_post_viewers'))
        post_viewers_subquery = post_viewers_subquery.outerjoin(postViewers, and_(postViewers.creation_date >= intervals_table.c.interval_start, postViewers.creation_date < intervals_table.c.interval_end)).\
                        join(viewedPosts, and_(postViewers.post_id == viewedPosts.id, viewedPosts.discussion_id == discussion.id))
        post_viewers_subquery = post_viewers_subquery.group_by(
            intervals_table.c.interval_id)
        post_viewers_subquery = post_viewers_subquery.subquery()

        # The visitors
        firstTimeVisitorAgent = aliased(AgentStatusInDiscussion)
        visitors_subquery = discussion.db.query(
            intervals_table.c.interval_id,
            func.count(firstTimeVisitorAgent.id).label(
                'count_first_time_logged_in_visitors'),
            # func.DB.DBA.BAG_AGG(firstTimeVisitorAgent.id).label('first_time_visitors')
        )
        visitors_subquery = visitors_subquery.outerjoin(
            firstTimeVisitorAgent,
            and_(
                firstTimeVisitorAgent.first_visit >=
                intervals_table.c.interval_start,
                firstTimeVisitorAgent.first_visit <
                intervals_table.c.interval_end,
                firstTimeVisitorAgent.discussion_id == discussion.id))
        visitors_subquery = visitors_subquery.group_by(
            intervals_table.c.interval_id)
        visitors_subquery = visitors_subquery.subquery()

        # The cumulative visitors
        cumulativeVisitorAgent = aliased(AgentStatusInDiscussion)
        cumulative_visitors_query = discussion.db.query(
            intervals_table.c.interval_id,
            func.count(distinct(cumulativeVisitorAgent.id)).label(
                'count_cumulative_logged_in_visitors'),
            # func.DB.DBA.BAG_AGG(cumulativeVisitorAgent.id).label('first_time_visitors')
        )
        cumulative_visitors_query = cumulative_visitors_query.outerjoin(
            cumulativeVisitorAgent,
            and_(
                cumulativeVisitorAgent.first_visit <
                intervals_table.c.interval_end,
                cumulativeVisitorAgent.discussion_id == discussion.id))
        cumulative_visitors_query = cumulative_visitors_query.group_by(
            intervals_table.c.interval_id)
        cumulative_visitors_subquery = cumulative_visitors_query.subquery()
        # query = cumulative_visitors_query

        # The members (can go up and down...)  Assumes that first_subscribed is available
        commented_out = """ first_subscribed isn't yet filled in by assembl
        memberAgentStatus = aliased(AgentStatusInDiscussion)
        members_subquery = discussion.db.query(intervals_table.c.interval_id,
            func.count(memberAgentStatus.id).label('count_approximate_members')
            )
        members_subquery = members_subquery.outerjoin(memberAgentStatus, ((memberAgentStatus.last_unsubscribed >= intervals_table.c.interval_end) | (memberAgentStatus.last_unsubscribed.is_(None))) & ((memberAgentStatus.first_subscribed < intervals_table.c.interval_end) | (memberAgentStatus.first_subscribed.is_(None))) & (memberAgentStatus.discussion_id==discussion.id))
        members_subquery = members_subquery.group_by(intervals_table.c.interval_id)
        query = members_subquery
        members_subquery = members_subquery.subquery()
        """

        subscribersAgentStatus = aliased(AgentStatusInDiscussion)
        subscribers_query = discussion.db.query(
            intervals_table.c.interval_id,
            func.sum(
                case([(subscribersAgentStatus.last_visit == None, 0),
                      (and_(
                          subscribersAgentStatus.last_visit <
                          intervals_table.c.interval_end,
                          subscribersAgentStatus.last_visit >=
                          intervals_table.c.interval_start), 1)],
                     else_=0)).label('retention_count_last_visit_in_period'),
            func.sum(
                case(
                    [(subscribersAgentStatus.first_visit == None, 0),
                     (and_(
                         subscribersAgentStatus.first_visit <
                         intervals_table.c.interval_end,
                         subscribersAgentStatus.first_visit >=
                         intervals_table.c.interval_start), 1)],
                    else_=0)).label('recruitment_count_first_visit_in_period'),
            func.sum(
                case([(subscribersAgentStatus.first_subscribed == None, 0),
                      (and_(
                          subscribersAgentStatus.first_subscribed <
                          intervals_table.c.interval_end,
                          subscribersAgentStatus.first_subscribed >=
                          intervals_table.c.interval_start), 1)],
                     else_=0)).
            label('UNRELIABLE_recruitment_count_first_subscribed_in_period'),
            func.sum(
                case([(subscribersAgentStatus.last_unsubscribed == None, 0),
                      (and_(
                          subscribersAgentStatus.last_unsubscribed <
                          intervals_table.c.interval_end,
                          subscribersAgentStatus.last_unsubscribed >=
                          intervals_table.c.interval_start), 1)],
                     else_=0)
            ).label('UNRELIABLE_retention_count_first_subscribed_in_period'),
        )
        subscribers_query = subscribers_query.outerjoin(
            subscribersAgentStatus,
            subscribersAgentStatus.discussion_id == discussion.id)
        subscribers_query = subscribers_query.group_by(
            intervals_table.c.interval_id)
        subscribers_subquery = subscribers_query.subquery()
        #query = subscribers_query

        combined_query = discussion.db.query(
            intervals_table,
            post_subquery,
            cumulative_posts_subquery,
            post_viewers_subquery,
            visitors_subquery,
            cumulative_visitors_subquery,
            case([
                (cumulative_posts_subquery.c.count_cumulative_post_authors ==
                 0, None),
                (cumulative_posts_subquery.c.count_cumulative_post_authors !=
                 0, (cast(post_subquery.c.count_post_authors, Float) / cast(
                     cumulative_posts_subquery.c.count_cumulative_post_authors,
                     Float)))
            ]).label('fraction_cumulative_authors_who_posted_in_period'),
            case([
                (cumulative_visitors_subquery.c.
                 count_cumulative_logged_in_visitors == 0, None),
                (cumulative_visitors_subquery.c.
                 count_cumulative_logged_in_visitors != 0,
                 (cast(post_subquery.c.count_post_authors, Float) / cast(
                     cumulative_visitors_subquery.c.
                     count_cumulative_logged_in_visitors, Float)))
            ]).label(
                'fraction_cumulative_logged_in_visitors_who_posted_in_period'),
            subscribers_subquery,
        )
        combined_query = combined_query.join(
            post_subquery,
            post_subquery.c.interval_id == intervals_table.c.interval_id)
        combined_query = combined_query.join(
            post_viewers_subquery, post_viewers_subquery.c.interval_id ==
            intervals_table.c.interval_id)
        combined_query = combined_query.join(
            visitors_subquery,
            visitors_subquery.c.interval_id == intervals_table.c.interval_id)
        combined_query = combined_query.join(
            cumulative_visitors_subquery,
            cumulative_visitors_subquery.c.interval_id ==
            intervals_table.c.interval_id)
        # combined_query = combined_query.join(members_subquery, members_subquery.c.interval_id==intervals_table.c.interval_id)
        combined_query = combined_query.join(
            subscribers_subquery, subscribers_subquery.c.interval_id ==
            intervals_table.c.interval_id)
        combined_query = combined_query.join(
            cumulative_posts_subquery, cumulative_posts_subquery.c.interval_id
            == intervals_table.c.interval_id)

        query = combined_query
        query = query.order_by(intervals_table.c.interval_id)
        results = query.all()

        # pprint.pprint(results)
        # end of transaction

    intervals_table.drop()
    if not (request.GET.get('format', None) == 'csv'
            or request.accept == 'text/csv'):
        # json default
        from assembl.lib.json import DateJSONEncoder
        return Response(json.dumps(results, cls=DateJSONEncoder),
                        content_type='application/json')

    fieldnames = [
        "interval_id",
        "interval_start",
        "interval_end",
        "count_first_time_logged_in_visitors",
        "count_cumulative_logged_in_visitors",
        "fraction_cumulative_logged_in_visitors_who_posted_in_period",
        "count_post_authors",
        "count_cumulative_post_authors",
        "fraction_cumulative_authors_who_posted_in_period",
        "count_posts",
        "count_cumulative_posts",
        "recruitment_count_first_visit_in_period",
        "UNRELIABLE_recruitment_count_first_subscribed_in_period",
        "retention_count_last_visit_in_period",
        "UNRELIABLE_retention_count_first_subscribed_in_period",
        "UNRELIABLE_count_post_viewers",
    ]
    # otherwise assume csv
    return csv_response(fieldnames, [r._asdict() for r in results])
예제 #14
0
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread
    order can be chronological, reverse_chronological
    message, is_unread=false returns only read messages)
    """
    localizer = request.localizer
    discussion_id = int(request.matchdict['discussion_id'])
    discussion = Discussion.get(int(discussion_id))
    if not discussion:
        raise HTTPNotFound(
            localizer.translate(_("No discussion found with id=%s")) %
            discussion_id)

    discussion.import_from_sources()

    user_id = authenticated_userid(request) or Everyone
    permissions = get_permissions(user_id, discussion_id)

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [
        filter_name for filter_name \
        in request.GET.getone('filters').split(',') \
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    text_search = request.GET.get('text_search', None)

    order = request.GET.get('order')
    if order == None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score')
    if order == 'score':
        assert text_search is not None

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = get_database_id("Post", root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = get_database_id("Post", family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = get_database_id("Idea", root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [get_database_id("Post", id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = get_database_id("AgentProfile", post_author_id)
        assert AgentProfile.get(
            post_author_id
        ), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = get_database_id("AgentProfile", post_replies_to)
        assert AgentProfile.get(
            post_replies_to
        ), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    if order == 'score':
        posts = discussion.db.query(PostClass,
                                    Content.body_text_index.score_name)
    else:
        posts = discussion.db.query(PostClass)

    posts = posts.filter(PostClass.discussion_id == discussion_id, )
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    only_orphan = request.GET.get('only_orphan')
    if only_orphan == "true":
        if root_idea_id:
            raise HTTPBadRequest(
                localizer.translate(
                    _("Getting orphan posts of a specific idea isn't supported."
                      )))
        orphans = text(Idea._get_orphan_posts_statement(),
                       bindparams=[
                           bindparam('discussion_id', discussion_id)
                       ]).columns(column('post_id')).alias('orphans')
        posts = posts.join(orphans, PostClass.id == orphans.c.post_id)
    elif only_orphan == "false":
        raise HTTPBadRequest(
            localizer.translate(
                _("Getting non-orphan posts isn't supported.")))

    # "true" means hidden only, "false" (default) means visible only. "any" means both.
    hidden = request.GET.get('hidden_messages', "false")
    if hidden != 'any':
        posts = posts.filter(PostClass.hidden == asbool(hidden))

    if root_idea_id:
        related = text(Idea._get_related_posts_statement(),
                       bindparams=[
                           bindparam('root_idea_id', root_idea_id),
                           bindparam('discussion_id', discussion_id)
                       ]).columns(column('post_id')).alias('related')
        #Virtuoso bug: This should work...
        #posts = posts.join(related, PostClass.id==related.c.post_id)
        posts = posts.join(related, PostClass.id == related.c.post_id)
    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id))
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id)
                             | (PostClass.id.in_(ancestor_ids)))
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)
    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {
            v.post_id
            for v in discussion.db.query(ViewPost).filter(
                ViewPost.tombstone_condition(), ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion_id))
        }
        liked_posts = {
            l.post_id: l.id
            for l in discussion.db.query(LikedPost).filter(
                LikedPost.tombstone_condition(), LikedPost.actor_id == user_id,
                *LikedPost.get_discussion_conditions(discussion_id))
        }
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost,
                and_(ViewPost.actor_id == user_id,
                     ViewPost.post_id == PostClass.id,
                     ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
    else:
        #If there is no user_id, all posts are always unread
        if is_unread == "false":
            raise HTTPBadRequest(
                localizer.translate(
                    _("You must be logged in to view which posts are read")))

    if text_search is not None:
        # another Virtuoso bug: offband kills score. but it helps speed.
        offband = () if (order == 'score') else None
        posts = posts.filter(
            Post.body_text_index.contains(text_search.encode('utf-8'),
                                          offband=offband))

    #posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def == 'id_only':
        pass  # posts = posts.options(defer(Post.body))
    else:
        posts = posts.options(joinedload_all(Post.creator))
        posts = posts.options(joinedload_all(Post.extracts))
        posts = posts.options(joinedload_all(Post.widget_idea_links))
        posts = posts.options(joinedload_all(
            SynthesisPost.publishes_synthesis))

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(Content.body_text_index.score_name.desc())
    print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    for query_result in posts:
        score, viewpost, likedpost = None, None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        if user_id != Everyone:
            viewpost = post.id in read_posts
            likedpost = liked_posts.get(post.id, None)
        no_of_posts += 1
        serializable_post = post.generic_json(view_def, user_id,
                                              permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(actor_id=user_id, post=root_post)
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        # serializable_post['liked'] = likedpost.uri() if likedpost else False
        serializable_post['liked'] = LikedPost.uri_generic(
            likedpost) if likedpost else False

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to
    # calculate the counts with a separate query to have the right number of
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion_id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(float(data["total"]) / page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size - 1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size - 1)
    data["posts"] = post_data

    return data
예제 #15
0
파일: post.py 프로젝트: cimadure/idealoom
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread
     message, is_unread=false returns only read messages)
    order: can be chronological, reverse_chronological, popularity
    root_post_id: all posts below the one specified.
    family_post_id: all posts below the one specified, and all its ancestors.
    post_reply_to: replies to a given post
    root_idea_id: all posts associated with the given idea
    ids: explicit message ids.
    posted_after_date, posted_before_date: date selection (ISO format)
    post_author: filter by author
    keyword: use full-text search
    locale: restrict to locale
    """
    localizer = request.localizer
    discussion = request.context

    discussion.import_from_sources()

    user_id = authenticated_userid(request) or Everyone
    permissions = request.permissions

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [
        filter_name for filter_name in request.GET.getone('filters').split(',')
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    keywords = request.GET.getall('keyword')

    order = request.GET.get('order')
    if order is None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score',
                     'popularity')
    if order == 'score' and not keywords:
        raise HTTPBadRequest("Cannot ask for a score without keywords")

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = Post.get_database_id(root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = Post.get_database_id(family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = Idea.get_database_id(root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [Post.get_database_id(id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = AgentProfile.get_database_id(post_author_id)
        assert AgentProfile.get(
            post_author_id
        ), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = AgentProfile.get_database_id(post_replies_to)
        assert AgentProfile.get(
            post_replies_to
        ), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    posts = discussion.db.query(PostClass)

    posts = posts.filter(PostClass.discussion == discussion, )
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    # True means deleted only, False (default) means non-deleted only. None means both.

    deleted = request.GET.get('deleted', None)
    if deleted is None:
        if not ids:
            deleted = False
        else:
            deleted = None
    elif deleted.lower() == "any":
        deleted = None
    else:
        deleted = asbool(deleted)
    # if deleted is not in (False, True, None):
    #    deleted = False
    # end v4

    only_orphan = asbool(request.GET.get('only_orphan', False))
    if only_orphan:
        if root_idea_id:
            raise HTTPBadRequest(
                localizer.translate(
                    _("Getting orphan posts of a specific idea isn't supported."
                      )))
        orphans = Idea._get_orphan_posts_statement(
            discussion.id, True, include_deleted=deleted).subquery("orphans")
        posts = posts.join(orphans, PostClass.id == orphans.c.post_id)

    if root_idea_id:
        related = Idea.get_related_posts_query_c(discussion.id,
                                                 root_idea_id,
                                                 True,
                                                 include_deleted=deleted)
        posts = posts.join(related, PostClass.id == related.c.post_id)
    elif not only_orphan:
        if deleted is not None:
            if deleted:
                posts = posts.filter(
                    PostClass.publication_state.in_(
                        deleted_publication_states))
            else:
                posts = posts.filter(PostClass.tombstone_date == None)

    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id))
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter((Post.ancestry.like(root_post.ancestry +
                                                 cast(root_post.id, String) +
                                                 ',%'))
                             | (PostClass.id == root_post.id)
                             | (PostClass.id.in_(ancestor_ids)))
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)

    if keywords:
        locales = request.GET.getall('locale')
        posts, rank = add_text_search(posts, (PostClass.body_id, ), keywords,
                                      locales, order == 'score')

    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    translations = None
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {
            v.post_id
            for v in discussion.db.query(ViewPost).filter(
                ViewPost.tombstone_condition(), ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion.id))
        }
        liked_posts = {
            l.post_id: l.id
            for l in discussion.db.query(LikedPost).filter(
                LikedPost.tombstone_condition(), LikedPost.actor_id == user_id,
                *LikedPost.get_discussion_conditions(discussion.id))
        }
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost,
                and_(ViewPost.actor_id == user_id,
                     ViewPost.post_id == PostClass.id,
                     ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
        user = AgentProfile.get(user_id)
        service = discussion.translation_service()
        if service.canTranslate is not None:
            translations = PrefCollectionTranslationTable(
                service, LanguagePreferenceCollection.getCurrent(request))
    else:
        #If there is no user_id, all posts are always unread
        if is_unread == "false":
            raise HTTPBadRequest(
                localizer.translate(
                    _("You must be logged in to view which posts are read")))

    # posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def in ('partial_post', 'id_only'):
        pass  # posts = posts.options(defer(Post.body))
    else:
        ideaContentLinkQuery = posts.with_entities(
            PostClass.id, PostClass.idea_content_links_above_post)
        ideaContentLinkCache = dict(ideaContentLinkQuery.all())
        posts = posts.options(
            # undefer(Post.idea_content_links_above_post),
            joinedload_all(Post.creator),
            joinedload_all(Post.extracts),
            joinedload_all(Post.widget_idea_links),
            joinedload_all(SynthesisPost.publishes_synthesis),
            subqueryload_all(Post.attachments))
        if len(discussion.discussion_locales) > 1:
            posts = posts.options(*Content.subqueryload_options())
        else:
            posts = posts.options(*Content.joinedload_options())

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(rank.desc())
    elif order == 'popularity':
        # assume reverse chronological otherwise
        posts = posts.order_by(Content.like_count.desc(),
                               Content.creation_date.desc())
    else:
        posts = posts.order_by(Content.id)
    # print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    if deleted is True:
        # We just got deleted posts, now we want their ancestors for context
        post_ids = set()
        ancestor_ids = set()

        def add_ancestors(post):
            post_ids.add(post.id)
            ancestor_ids.update(
                [int(x) for x in post.ancestry.strip(",").split(",") if x])

        posts = list(posts)
        for post in posts:
            add_ancestors(post)
        ancestor_ids -= post_ids
        if ancestor_ids:
            ancestors = discussion.db.query(PostClass).filter(
                PostClass.id.in_(ancestor_ids))
            if view_def in ('partial_post', 'id_only'):
                pass  # ancestors = ancestors.options(defer(Post.body))
            else:
                ancestors = ancestors.options(
                    # undefer(Post.idea_content_links_above_post),
                    joinedload_all(Post.creator),
                    joinedload_all(Post.extracts),
                    joinedload_all(Post.widget_idea_links),
                    joinedload_all(SynthesisPost.publishes_synthesis),
                    subqueryload_all(Post.attachments))
                if len(discussion.discussion_locales) > 1:
                    ancestors = ancestors.options(
                        *Content.subqueryload_options())
                else:
                    ancestors = ancestors.options(
                        *Content.joinedload_options())
            posts.extend(ancestors.all())

    if view_def == 'id_only':
        posts = posts.with_entities(PostClass.id)

    for query_result in posts:
        score, viewpost, likedpost = None, None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        no_of_posts += 1
        if view_def == 'id_only':
            post_data.append(Content.uri_generic(post))
            continue
        if deleted is True:
            add_ancestors(post)

        if user_id != Everyone:
            viewpost = post.id in read_posts
            likedpost = liked_posts.get(post.id, None)
            if view_def not in ("partial_post", "id_only"):
                translate_content(post,
                                  translation_table=translations,
                                  service=service)
        serializable_post = post.generic_json(view_def, user_id,
                                              permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(actor_id=user_id, post=root_post)
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        # serializable_post['liked'] = likedpost.uri() if likedpost else False
        serializable_post['liked'] = (LikedPost.uri_generic(likedpost)
                                      if likedpost else False)
        if view_def not in ("partial_post", "id_only"):
            serializable_post['indirect_idea_content_links'] = (
                post.indirect_idea_content_links_with_cache(
                    ideaContentLinkCache.get(post.id, None)))

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to
    # calculate the counts with a separate query to have the right number of
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion.id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(data["total"] / page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size - 1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size - 1)
    data["posts"] = post_data

    return data
예제 #16
0
파일: post.py 프로젝트: assembl/assembl
def get_posts(request):
    """
    Query interface on posts
    Filters have two forms:
    only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan)
    is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages)
    order: can be chronological, reverse_chronological, popularity
    root_post_id: all posts below the one specified.
    family_post_id: all posts below the one specified, and all its ancestors.
    post_reply_to: replies to a given post
    root_idea_id: all posts associated with the given idea
    ids: explicit message ids.
    posted_after_date, posted_before_date: date selection (ISO format)
    post_author: filter by author
    classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!"
    """
    localizer = request.localizer
    discussion_id = int(request.matchdict['discussion_id'])
    discussion = Discussion.get(int(discussion_id))
    if not discussion:
        raise HTTPNotFound(localizer.translate(
            _("No discussion found with id=%s")) % discussion_id)

    discussion.import_from_sources()

    user_id = request.authenticated_userid or Everyone
    permissions = get_permissions(user_id, discussion_id)

    DEFAULT_PAGE_SIZE = 25
    page_size = DEFAULT_PAGE_SIZE

    filter_names = [
        filter_name for filter_name
        in request.GET.getone('filters').split(',')
        if filter_name
    ] if request.GET.get('filters') else []

    try:
        page = int(request.GET.getone('page'))
    except (ValueError, KeyError):
        page = 1

    text_search = request.GET.get('text_search', None)

    order = request.GET.get('order')
    if order is None:
        order = 'chronological'
    assert order in ('chronological', 'reverse_chronological', 'score', 'popularity')
    if order == 'score':
        assert text_search is not None

    if page < 1:
        page = 1

    root_post_id = request.GET.getall('root_post_id')
    if root_post_id:
        root_post_id = get_database_id("Post", root_post_id[0])
    family_post_id = request.GET.getall('family_post_id')
    if family_post_id:
        family_post_id = get_database_id("Post", family_post_id[0])

    root_idea_id = request.GET.getall('root_idea_id')
    if root_idea_id:
        root_idea_id = get_database_id("Idea", root_idea_id[0])

    ids = request.GET.getall('ids[]')
    if ids:
        ids = [get_database_id("Post", id) for id in ids]

    view_def = request.GET.get('view') or 'default'

    only_synthesis = request.GET.get('only_synthesis')

    post_author_id = request.GET.get('post_author')
    if post_author_id:
        post_author_id = get_database_id("AgentProfile", post_author_id)
        assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id

    post_replies_to = request.GET.get('post_replies_to')
    if post_replies_to:
        post_replies_to = get_database_id("AgentProfile", post_replies_to)
        assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to

    posted_after_date = request.GET.get('posted_after_date')
    posted_before_date = request.GET.get('posted_before_date')
    message_classifiers = request.GET.getall('classifier')

    PostClass = SynthesisPost if only_synthesis == "true" else Post
    if order == 'score':
        posts = discussion.db.query(PostClass, Content.body_text_index.score_name)
    else:
        posts = discussion.db.query(PostClass)

    posts = posts.filter(
        PostClass.discussion_id == discussion_id,
    ).filter(PostClass.type != 'proposition_post')
    ##no_of_posts_to_discussion = posts.count()

    post_data = []

    # True means deleted only, False (default) means non-deleted only. None means both.

    # v0
    # deleted = request.GET.get('deleted', None)
    # end v0

    # v1: we would like something like that
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if view_def == 'id_only':
    #         deleted = None
    #     else:
    #         deleted = False
    # end v1

    # v2
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if not ids:
    #         deleted = False
    #     else:
    #         deleted = None
    #
    # if deleted == 'false':
    #     deleted = False
    #     posts = posts.filter(PostClass.tombstone_condition())
    # elif deleted == 'true':
    #     deleted = True
    #     posts = posts.filter(PostClass.not_tombstone_condition())
    # elif deleted == 'any':
    #     deleted = None
    #     # result will contain deleted and non-deleted posts
    #     pass
    # end v2


    # v3
    # deleted = request.GET.get('deleted', None)
    # if deleted is None:
    #     if not ids:
    #         deleted = False
    #     else:
    #         deleted = None

    # if deleted == 'true':
    #     deleted = True
    #     posts = posts.filter(PostClass.not_tombstone_condition())
    # end v3

    # v4
    deleted = request.GET.get('deleted', None)
    if deleted is None:
        if not ids:
            deleted = False
        else:
            deleted = None
    elif deleted.lower() == "any":
        deleted = None
    else:
        deleted = asbool(deleted)
    # if deleted is not in (False, True, None):
    #    deleted = False
    # end v4

    only_orphan = asbool(request.GET.get('only_orphan', False))
    if only_orphan:
        if root_idea_id:
            raise HTTPBadRequest(localizer.translate(
                _("Getting orphan posts of a specific idea isn't supported.")))
        orphans = Idea._get_orphan_posts_statement(
            discussion_id, True, include_deleted=deleted).subquery("orphans")
        posts = posts.join(orphans, PostClass.id == orphans.c.post_id)

    if root_idea_id:
        related = Idea.get_related_posts_query_c(
            discussion_id, root_idea_id, True, include_deleted=deleted)
        posts = posts.join(related, PostClass.id == related.c.post_id)
    elif not only_orphan:
        if deleted is not None:
            if deleted:
                posts = posts.filter(
                    PostClass.publication_state.in_(
                        deleted_publication_states))
            else:
                posts = posts.filter(
                    PostClass.tombstone_date == None)

    if root_post_id:
        root_post = Post.get(root_post_id)

        posts = posts.filter(
            (Post.ancestry.like(
            root_post.ancestry + cast(root_post.id, String) + ',%'
            ))
            |
            (PostClass.id==root_post.id)
            )
    elif family_post_id:
        root_post = Post.get(family_post_id)
        ancestor_ids = root_post.ancestor_ids()
        posts = posts.filter(
            (Post.ancestry.like(
            root_post.ancestry + cast(root_post.id, String) + ',%'
            ))
            |
            (PostClass.id==root_post.id)
            |
            (PostClass.id.in_(ancestor_ids))
            )
    else:
        root_post = None

    if ids:
        posts = posts.filter(Post.id.in_(ids))

    if posted_after_date:
        posted_after_date = parse_datetime(posted_after_date)
        if posted_after_date:
            posts = posts.filter(PostClass.creation_date >= posted_after_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if posted_before_date:
        posted_before_date = parse_datetime(posted_before_date)
        if posted_before_date:
            posts = posts.filter(PostClass.creation_date <= posted_before_date)
        #Maybe we should do something if the date is invalid.  benoitg

    if post_author_id:
        posts = posts.filter(PostClass.creator_id == post_author_id)

    if message_classifiers:
        if any([len(classifier) == 0 for classifier in message_classifiers]):
            return {'total': 0, 'posts': []}
        polarities = [classifier[0] != "!" for classifier in message_classifiers]
        polarity = all(polarities)
        if not polarity:
            message_classifiers = [c.strip("!") for c in message_classifiers]
        if polarity != any(polarities):
            raise HTTPBadRequest(_("Do not combine negative and positive classifiers"))
        # Treat null as no classifier
        includes_null = 'null' in message_classifiers
        if includes_null:
            message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers)
        if polarity:
            if len(message_classifiers) == 1:
                term = PostClass.message_classifier == (None if includes_null else message_classifiers[0])
            else:
                term = PostClass.message_classifier.in_(message_classifiers_nonull)
                if includes_null:
                    term = term | (PostClass.message_classifier == None)
        else:
            if len(message_classifiers) == 1:
                term = PostClass.message_classifier != (None if includes_null else message_classifiers[0])
            else:
                term = PostClass.message_classifier.notin_(message_classifiers_nonull)
            if not includes_null:
                term = term | (PostClass.message_classifier == None)
        posts = posts.filter(term)

    if post_replies_to:
        parent_alias = aliased(PostClass)
        posts = posts.join(parent_alias, PostClass.parent)
        posts = posts.filter(parent_alias.creator_id == post_replies_to)
    # Post read/unread management
    is_unread = request.GET.get('is_unread')
    translations = None
    if user_id != Everyone:
        # This is horrible, but the join creates complex subqueries that
        # virtuoso cannot decode properly.
        read_posts = {v.post_id for v in discussion.db.query(
            ViewPost).filter(
                ViewPost.tombstone_condition(),
                ViewPost.actor_id == user_id,
                *ViewPost.get_discussion_conditions(discussion_id))}
        my_sentiments = {l.post_id: l for l in discussion.db.query(
            SentimentOfPost).filter(
                SentimentOfPost.tombstone_condition(),
                SentimentOfPost.actor_id == user_id,
                *SentimentOfPost.get_discussion_conditions(discussion_id))}
        if is_unread != None:
            posts = posts.outerjoin(
                ViewPost, and_(
                    ViewPost.actor_id==user_id,
                    ViewPost.post_id==PostClass.id,
                    ViewPost.tombstone_date == None))
            if is_unread == "true":
                posts = posts.filter(ViewPost.id == None)
            elif is_unread == "false":
                posts = posts.filter(ViewPost.id != None)
        user = AgentProfile.get(user_id)
        service = discussion.translation_service()
        if service.canTranslate is not None:
            translations = PrefCollectionTranslationTable(
                service, LanguagePreferenceCollection.getCurrent(request))
    else:
        #If there is no user_id, all posts are always unread
        my_sentiments = {}
        if is_unread == "false":
            raise HTTPBadRequest(localizer.translate(
                _("You must be logged in to view which posts are read")))

    if text_search is not None:
        # another Virtuoso bug: offband kills score. but it helps speed.
        offband = () if (order == 'score') else None
        posts = posts.filter(Post.body_text_index.contains(
            text_search.encode('utf-8'), offband=offband))

    # posts = posts.options(contains_eager(Post.source))
    # Horrible hack... But useful for structure load
    if view_def == 'id_only':
        pass  # posts = posts.options(defer(Post.body))
    else:
        ideaContentLinkQuery = posts.with_entities(
            PostClass.id, PostClass.idea_content_links_above_post)
        ideaContentLinkCache = dict(ideaContentLinkQuery.all())
        # Note: we could count the like the same way and kill the subquery.
        # But it interferes with the popularity order,
        # and the benefit is not that high.
        sentiment_counts = discussion.db.query(
                PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id)
            ).join(SentimentOfPost
            ).filter(PostClass.id.in_(posts.with_entities(PostClass.id).subquery()),
                     SentimentOfPost.tombstone_condition()
            ).group_by(PostClass.id, SentimentOfPost.type)
        sentiment_counts_by_post_id = defaultdict(dict)
        for (post_id, sentiment_type, sentiment_count) in sentiment_counts:
            sentiment_counts_by_post_id[post_id][
                sentiment_type[SentimentOfPost.TYPE_PREFIX_LEN:]
            ] = sentiment_count
        posts = posts.options(
            # undefer(Post.idea_content_links_above_post),
            joinedload_all(Post.creator),
            joinedload_all(Post.extracts),
            joinedload_all(Post.widget_idea_links),
            joinedload_all(SynthesisPost.publishes_synthesis),
            subqueryload_all(Post.attachments))
        if len(discussion.discussion_locales) > 1:
            posts = posts.options(*Content.subqueryload_options())
        else:
            posts = posts.options(*Content.joinedload_options())

    if order == 'chronological':
        posts = posts.order_by(Content.creation_date)
    elif order == 'reverse_chronological':
        posts = posts.order_by(Content.creation_date.desc())
    elif order == 'score':
        posts = posts.order_by(Content.body_text_index.score_name.desc())
    elif order == 'popularity':
        # assume reverse chronological otherwise
        posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc())
    else:
        posts = posts.order_by(Content.id)
    # print str(posts)

    no_of_posts = 0
    no_of_posts_viewed_by_user = 0

    if deleted is True:
        # We just got deleted posts, now we want their ancestors for context
        post_ids = set()
        ancestor_ids = set()

        def add_ancestors(post):
            post_ids.add(post.id)
            ancestor_ids.update(
                [int(x) for x in post.ancestry.strip(",").split(",") if x])
        posts = list(posts)
        for post in posts:
            add_ancestors(post)
        ancestor_ids -= post_ids
        if ancestor_ids:
            ancestors = discussion.db.query(
                PostClass).filter(PostClass.id.in_(ancestor_ids))
            if view_def == 'id_only':
                pass  # ancestors = ancestors.options(defer(Post.body))
            else:
                ancestors = ancestors.options(
                    # undefer(Post.idea_content_links_above_post),
                    joinedload_all(Post.creator),
                    joinedload_all(Post.extracts),
                    joinedload_all(Post.widget_idea_links),
                    joinedload_all(SynthesisPost.publishes_synthesis),
                    subqueryload_all(Post.attachments))
                if len(discussion.discussion_locales) > 1:
                    ancestors = ancestors.options(
                        *Content.subqueryload_options())
                else:
                    ancestors = ancestors.options(
                        *Content.joinedload_options())
            posts.extend(ancestors.all())

    for query_result in posts:
        score, viewpost = None, None
        if not isinstance(query_result, (list, tuple)):
            query_result = [query_result]
        post = query_result[0]
        if deleted is True:
            add_ancestors(post)

        if user_id != Everyone:
            viewpost = post.id in read_posts
            if view_def != "id_only":
                translate_content(
                    post, translation_table=translations, service=service)
        no_of_posts += 1
        serializable_post = post.generic_json(
            view_def, user_id, permissions) or {}
        if order == 'score':
            score = query_result[1]
            serializable_post['score'] = score

        if viewpost:
            serializable_post['read'] = True
            no_of_posts_viewed_by_user += 1
        elif user_id != Everyone and root_post is not None and root_post.id == post.id:
            # Mark post read, we requested it explicitely
            viewed_post = ViewPost(
                actor_id=user_id,
                post=root_post
                )
            discussion.db.add(viewed_post)
            serializable_post['read'] = True
        else:
            serializable_post['read'] = False
        my_sentiment = my_sentiments.get(post.id, None)
        if my_sentiment is not None:
            my_sentiment = my_sentiment.generic_json('default', user_id, permissions)
        serializable_post['my_sentiment'] = my_sentiment
        if view_def != "id_only":
            serializable_post['indirect_idea_content_links'] = (
                post.indirect_idea_content_links_with_cache(
                    ideaContentLinkCache.get(post.id, None)))
            serializable_post['sentiment_counts'] = sentiment_counts_by_post_id[post.id]

        post_data.append(serializable_post)

    # Benoitg:  For now, this completely garbles threading without intelligent
    #handling of pagination.  Disabling
    #posts = posts.limit(page_size).offset(data['startIndex']-1)
    # This code isn't up to date.  If limiting the query by page, we need to
    # calculate the counts with a separate query to have the right number of
    # results
    #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join(
    #    Post
    #).filter(
    #    Post.discussion_id == discussion_id,
    #    ViewPost.actor_id == user_id,
    #).count() if user_id else 0

    data = {}
    data["page"] = page
    data["unread"] = no_of_posts - no_of_posts_viewed_by_user
    data["total"] = no_of_posts
    data["maxPage"] = max(1, ceil(float(data["total"])/page_size))
    #TODO:  Check if we want 1 based index in the api
    data["startIndex"] = (page_size * page) - (page_size-1)

    if data["page"] == data["maxPage"]:
        data["endIndex"] = data["total"]
    else:
        data["endIndex"] = data["startIndex"] + (page_size-1)
    data["posts"] = post_data

    return data
예제 #17
0
def get_contribution_count(request):
    import isodate
    from datetime import datetime
    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []
    if interval:
        while start < end:
            this_end = min(start+interval, end)
            results.append(dict(
                start=start.isoformat(), end=this_end.isoformat(),
                count=discussion.count_contributions_per_agent(
                    start, this_end)))
            start = this_end
    else:
        r = dict(count=discussion.count_contributions_per_agent(start, end))
        if not start:
            from assembl.models import Post
            from sqlalchemy import func
            (start,) = discussion.db.query(
                func.min(Post.creation_date)).filter_by(
                discussion_id=discussion.id).first()
        r["start"] = start.isoformat()
        if not end:
            end = datetime.now()
        r["end"] = end.isoformat()
        results.append(r)
    if not (request.GET.get('format', None) == 'csv'
            or request.accept == 'text/csv'):
        # json default
        for v in results:
            v['count'] = {agent.display_name(): count
                          for (agent, count) in v['count']}
        return Response(json.dumps(results), content_type='application/json')
    # otherwise assume csv
    from csv import writer
    total_count = defaultdict(int)
    agents = {}
    for v in results:
        as_dict = {}
        for (agent, count) in v['count']:
            total_count[agent.id] += count
            as_dict[agent.id] = count
            agents[agent.id] = agent
        v['count'] = as_dict
    count_list = total_count.items()
    count_list.sort(key=lambda (a, c): c, reverse=True)
    output = StringIO()
    csv = writer(output, dialect='excel', delimiter=';')
    csv.writerow(['Start']+[
        x['start'] for x in results] + ['Total'])
    csv.writerow(['End']+[
        x['end'] for x in results] + [''])
    for agent_id, total_count in count_list:
        agent = agents[agent_id]
        agent_name = (
            agent.display_name() or agent.real_name() or
            agent.get_preferred_email())
        csv.writerow([agent_name.encode('utf-8')] + [
            x['count'].get(agent_id, '') for x in results] + [total_count])
    output.seek(0)
    return Response(body_file=output, content_type='text/csv')
예제 #18
0
def get_time_series_analytics(request):
    import isodate
    from datetime import datetime
    start = request.GET.get("start", None)
    end = request.GET.get("end", None)
    interval = request.GET.get("interval", None)
    discussion = request.context._instance
    user_id = authenticated_userid(request) or Everyone
    try:
        if start:
            start = parse_datetime(start)
        if end:
            end = parse_datetime(end)
        if interval:
            interval = isodate.parse_duration(interval)
    except isodate.ISO8601Error as e:
        raise HTTPBadRequest(e)
    if interval and not start:
        raise HTTPBadRequest("You cannot define an interval and no start")
    if interval and not end:
        end = datetime.now()
    results = []

    from sqlalchemy import Table, MetaData, and_, case, cast, Float
    from sqlalchemy.exc import ProgrammingError
    import pprint
    import transaction
    with transaction.manager:
        metadata = MetaData(discussion.db.get_bind())  # make sure we are using the same connexion

        intervals_table = Table('temp_table_intervals_' + str(user_id), metadata,
            Column('interval_id', Integer, primary_key=True),
            Column('interval_start', DateTime, nullable=False),
            Column('interval_end', DateTime, nullable=False),
            prefixes=None if discussion.using_virtuoso else ['TEMPORARY']
        )
        try:
            intervals_table.drop()  # In case there is a leftover from a previous crash
        except ProgrammingError:
            pass
        intervals_table.create()
        interval_start = start
        intervals = []
        if interval:
            while interval_start < end:
                interval_end = min(interval_start + interval, end)
                intervals.append({'interval_start': interval_start, 'interval_end': interval_end})
                interval_start = interval_start + interval
            #pprint.pprint(intervals)
            discussion.db.execute(intervals_table.insert(), intervals)
        else:
            raise HTTPBadRequest("Please specify an interval")

        from assembl.models import Post, AgentProfile, AgentStatusInDiscussion, ViewPost

        # The posters
        post_subquery = discussion.db.query(intervals_table.c.interval_id,
            func.count(distinct(Post.id)).label('count_posts'),
            func.count(distinct(Post.creator_id)).label('count_post_authors'),
            # func.DB.DBA.BAG_AGG(Post.creator_id).label('post_authors'),
            # func.DB.DBA.BAG_AGG(Post.id).label('post_ids'),
            )
        post_subquery = post_subquery.outerjoin(Post, and_(Post.creation_date >= intervals_table.c.interval_start, Post.creation_date < intervals_table.c.interval_end, Post.discussion_id == discussion.id))
        post_subquery = post_subquery.group_by(intervals_table.c.interval_id)
        post_subquery = post_subquery.subquery()

        # The cumulative posters
        cumulative_posts_aliased = aliased(Post)
        cumulative_posts_subquery = discussion.db.query(intervals_table.c.interval_id,
            func.count(distinct(cumulative_posts_aliased.id)).label('count_cumulative_posts'),
            func.count(distinct(cumulative_posts_aliased.creator_id)).label('count_cumulative_post_authors')
            # func.DB.DBA.BAG_AGG(cumulative_posts_aliased.id).label('cumulative_post_ids')
            )
        cumulative_posts_subquery = cumulative_posts_subquery.outerjoin(cumulative_posts_aliased, and_(cumulative_posts_aliased.creation_date < intervals_table.c.interval_end, cumulative_posts_aliased.discussion_id == discussion.id))
        cumulative_posts_subquery = cumulative_posts_subquery.group_by(intervals_table.c.interval_id)
        cumulative_posts_subquery = cumulative_posts_subquery.subquery()

        # The post viewers
        postViewers = aliased(ViewPost)
        viewedPosts = aliased(Post)
        post_viewers_subquery = discussion.db.query(intervals_table.c.interval_id,
            func.count(distinct(postViewers.actor_id)).label('UNRELIABLE_count_post_viewers')
            )
        post_viewers_subquery = post_viewers_subquery.outerjoin(postViewers, and_(postViewers.creation_date >= intervals_table.c.interval_start, postViewers.creation_date < intervals_table.c.interval_end)).\
                        join(viewedPosts, and_(postViewers.post_id == viewedPosts.id, viewedPosts.discussion_id == discussion.id))
        post_viewers_subquery = post_viewers_subquery.group_by(intervals_table.c.interval_id)
        post_viewers_subquery = post_viewers_subquery.subquery()

        # The visitors
        firstTimeVisitorAgent = aliased(AgentStatusInDiscussion)
        visitors_subquery = discussion.db.query(intervals_table.c.interval_id,
            func.count(firstTimeVisitorAgent.id).label('count_first_time_logged_in_visitors'),
            # func.DB.DBA.BAG_AGG(firstTimeVisitorAgent.id).label('first_time_visitors')
            )
        visitors_subquery = visitors_subquery.outerjoin(firstTimeVisitorAgent, and_(firstTimeVisitorAgent.first_visit >= intervals_table.c.interval_start, firstTimeVisitorAgent.first_visit < intervals_table.c.interval_end, firstTimeVisitorAgent.discussion_id == discussion.id))
        visitors_subquery = visitors_subquery.group_by(intervals_table.c.interval_id)
        visitors_subquery = visitors_subquery.subquery()

        # The cumulative visitors
        cumulativeVisitorAgent = aliased(AgentStatusInDiscussion)
        cumulative_visitors_query = discussion.db.query(intervals_table.c.interval_id,
            func.count(distinct(cumulativeVisitorAgent.id)).label('count_cumulative_logged_in_visitors'),
            # func.DB.DBA.BAG_AGG(cumulativeVisitorAgent.id).label('first_time_visitors')
            )
        cumulative_visitors_query = cumulative_visitors_query.outerjoin(cumulativeVisitorAgent, and_(cumulativeVisitorAgent.first_visit < intervals_table.c.interval_end, cumulativeVisitorAgent.discussion_id == discussion.id))
        cumulative_visitors_query = cumulative_visitors_query.group_by(intervals_table.c.interval_id)
        cumulative_visitors_subquery = cumulative_visitors_query.subquery()
        # query = cumulative_visitors_query

        # The members (can go up and down...)  Assumes that first_subscribed is available
        commented_out = """ first_subscribed isn't yet filled in by assembl
        memberAgentStatus = aliased(AgentStatusInDiscussion)
        members_subquery = discussion.db.query(intervals_table.c.interval_id,
            func.count(memberAgentStatus.id).label('count_approximate_members')
            )
        members_subquery = members_subquery.outerjoin(memberAgentStatus, ((memberAgentStatus.last_unsubscribed >= intervals_table.c.interval_end) | (memberAgentStatus.last_unsubscribed.is_(None))) & ((memberAgentStatus.first_subscribed < intervals_table.c.interval_end) | (memberAgentStatus.first_subscribed.is_(None))) & (memberAgentStatus.discussion_id==discussion.id))
        members_subquery = members_subquery.group_by(intervals_table.c.interval_id)
        query = members_subquery
        members_subquery = members_subquery.subquery()
        """

        subscribersAgentStatus = aliased(AgentStatusInDiscussion)
        subscribers_query = discussion.db.query(intervals_table.c.interval_id,
                                                func.sum(
                                                    case([
                                                          (subscribersAgentStatus.last_visit == None, 0),
                                                          (and_(subscribersAgentStatus.last_visit < intervals_table.c.interval_end, subscribersAgentStatus.last_visit >= intervals_table.c.interval_start), 1)
                                                          ], else_=0)
                                                         ).label('retention_count_last_visit_in_period'),
                                                func.sum(
                                                    case([
                                                          (subscribersAgentStatus.first_visit == None, 0),
                                                          (and_(subscribersAgentStatus.first_visit < intervals_table.c.interval_end, subscribersAgentStatus.first_visit >= intervals_table.c.interval_start), 1)
                                                          ], else_=0)
                                                         ).label('recruitment_count_first_visit_in_period'),
                                                func.sum(
                                                    case([
                                                          (subscribersAgentStatus.first_subscribed == None, 0),
                                                          (and_(subscribersAgentStatus.first_subscribed < intervals_table.c.interval_end, subscribersAgentStatus.first_subscribed >= intervals_table.c.interval_start), 1)
                                                          ], else_=0)
                                                         ).label('UNRELIABLE_recruitment_count_first_subscribed_in_period'),
                                                func.sum(
                                                    case([
                                                          (subscribersAgentStatus.last_unsubscribed == None, 0),
                                                          (and_(subscribersAgentStatus.last_unsubscribed < intervals_table.c.interval_end, subscribersAgentStatus.last_unsubscribed >= intervals_table.c.interval_start), 1)
                                                          ], else_=0)
                                                         ).label('UNRELIABLE_retention_count_first_subscribed_in_period'),
                                            )
        subscribers_query = subscribers_query.outerjoin(subscribersAgentStatus, subscribersAgentStatus.discussion_id==discussion.id)
        subscribers_query = subscribers_query.group_by(intervals_table.c.interval_id)
        subscribers_subquery = subscribers_query.subquery()
        #query = subscribers_query

        combined_query = discussion.db.query(intervals_table,
                                             post_subquery,
                                             cumulative_posts_subquery,
                                             post_viewers_subquery,
                                             visitors_subquery,
                                             cumulative_visitors_subquery,
                                             case([
                                                   (cumulative_posts_subquery.c.count_cumulative_post_authors == 0, None),
                                                   (cumulative_posts_subquery.c.count_cumulative_post_authors != 0, (cast(post_subquery.c.count_post_authors, Float) / cast(cumulative_posts_subquery.c.count_cumulative_post_authors, Float)))
                                                   ]).label('fraction_cumulative_authors_who_posted_in_period'),
                                             case([
                                                   (cumulative_visitors_subquery.c.count_cumulative_logged_in_visitors == 0, None),
                                                   (cumulative_visitors_subquery.c.count_cumulative_logged_in_visitors != 0, (cast(post_subquery.c.count_post_authors, Float) / cast(cumulative_visitors_subquery.c.count_cumulative_logged_in_visitors, Float)))
                                                   ]).label('fraction_cumulative_logged_in_visitors_who_posted_in_period'),
                                             subscribers_subquery,
                                             )
        combined_query = combined_query.join(post_subquery, post_subquery.c.interval_id == intervals_table.c.interval_id)
        combined_query = combined_query.join(post_viewers_subquery, post_viewers_subquery.c.interval_id == intervals_table.c.interval_id)
        combined_query = combined_query.join(visitors_subquery, visitors_subquery.c.interval_id == intervals_table.c.interval_id)
        combined_query = combined_query.join(cumulative_visitors_subquery, cumulative_visitors_subquery.c.interval_id == intervals_table.c.interval_id)
        # combined_query = combined_query.join(members_subquery, members_subquery.c.interval_id==intervals_table.c.interval_id)
        combined_query = combined_query.join(subscribers_subquery, subscribers_subquery.c.interval_id==intervals_table.c.interval_id)
        combined_query = combined_query.join(cumulative_posts_subquery, cumulative_posts_subquery.c.interval_id == intervals_table.c.interval_id)

        query = combined_query
        query = query.order_by(intervals_table.c.interval_id)
        results = query.all()

        # pprint.pprint(results)
        # end of transaction

    intervals_table.drop()
    if not (request.GET.get('format', None) == 'csv' or
            request.accept == 'text/csv'):
            # json default
        from assembl.lib.json import DateJSONEncoder
        return Response(json.dumps(results, cls=DateJSONEncoder),
                        content_type='application/json')

    fieldnames = [
        "interval_id",
        "interval_start",
        "interval_end",
        "count_first_time_logged_in_visitors",
        "count_cumulative_logged_in_visitors",
        "fraction_cumulative_logged_in_visitors_who_posted_in_period",
        "count_post_authors",
        "count_cumulative_post_authors",
        "fraction_cumulative_authors_who_posted_in_period",
        "count_posts",
        "count_cumulative_posts",
        "recruitment_count_first_visit_in_period",
        "UNRELIABLE_recruitment_count_first_subscribed_in_period",
        "retention_count_last_visit_in_period",
        "UNRELIABLE_retention_count_first_subscribed_in_period",
        "UNRELIABLE_count_post_viewers",
    ]
    # otherwise assume csv
    return csv_response(fieldnames, [r._asdict() for r in results])