def find_or_create_agent_profile(db, profile): from assembl.models import ( AgentProfile, SocialAuthAccount, User) assert isinstance(profile, AgentProfile) accounts = [] profiles = set() for account in profile.accounts: if isinstance(account, SocialAuthAccount): eq = find_or_create_provider_account(db, account) else: eq = find_or_create_object(account) if eq.profile: profiles.add(eq.profile) accounts.append(eq) if not profiles: cols = ['name', 'description'] # if isinstance(profile, User): # cols += ["preferred_email", "timezone"] new_profile = AgentProfile(**{k: getattr(profile, k) for k in cols}) db.add(new_profile) else: user_profiles = {p for p in profiles if isinstance(p, User)} if user_profiles: new_profile = user_profiles.pop() profiles.remove(new_profile) else: new_profile = profiles.pop() while profiles: new_profile.merge(profiles.pop()) for account in accounts: if account.profile is None: account.profile = new_profile db.add(account) return new_profile
def find_or_create_agent_profile(db, profile): from assembl.models import ( AgentProfile, IdentityProviderAccount, EmailAccount, User) assert isinstance(profile, AgentProfile) accounts = [] profiles = set() for account in profile.accounts: if isinstance(account, EmailAccount): eq = find_or_create_email_account(account) elif isinstance(account, IdentityProviderAccount): eq = find_or_create_provider_account(account) if eq.profile: profiles.add(eq.profile) accounts.append(eq) if not profiles: cols = ['name', 'description'] if isinstance(profile, User): cols += ["preferred_email", "timezone"] new_profile = AgentProfile(**{k: getattr(profile, k) for k in cols}) db.add(new_profile) else: new_profile = profiles.pop() while profiles: new_profile = new_profile.merge(profiles.pop()) for account in accounts: if account.profile is None: account.profile = new_profile db.add(account) return new_profile
def find_or_create_agent_profile(db, profile): from assembl.models import ( AgentProfile, SocialAuthAccount, User) assert isinstance(profile, AgentProfile) accounts = [] profiles = set() for account in profile.accounts: if isinstance(account, SocialAuthAccount): eq = find_or_create_provider_account(db, account) else: eq = find_or_create_object(account) if eq.profile: profiles.add(eq.profile) accounts.append(eq) if not profiles: cols = ['name', 'description'] # if isinstance(profile, User): # cols += ["preferred_email", "timezone"] new_profile = AgentProfile(**{k: getattr(profile, k) for k in cols}) db.add(new_profile) else: user_profiles = {p for p in profiles if isinstance(p, User)} if user_profiles: new_profile = user_profiles.pop() profiles.remove(new_profile) else: new_profile = profiles.pop() while profiles: new_profile.merge(profiles.pop()) for account in accounts: if account.profile is None: account.profile = new_profile db.add(account) return new_profile
def view_profile_collection(request): ctx = request.context view = request.GET.get('view', None) or ctx.get_default_view() or 'default' content = collection_view(request) if view != "id_only": discussion = ctx.get_instance_of_class(Discussion) if discussion: from assembl.models import Post, AgentProfile num_posts_per_user = \ AgentProfile.count_posts_in_discussion_all_profiles(discussion) for x in content: id = AgentProfile.get_database_id(x['@id']) if id in num_posts_per_user: x['post_count'] = num_posts_per_user[id] return content
def view_profile_collection(request): ctx = request.context view = request.GET.get('view', None) or ctx.get_default_view() or 'default' content = collection_view(request) if view != "id_only": discussion = ctx.get_instance_of_class(Discussion) if discussion: from assembl.models import Post, AgentProfile num_posts_per_user = \ AgentProfile.count_posts_in_discussion_all_profiles(discussion) for x in content: id = AgentProfile.get_database_id(x['@id']) if id in num_posts_per_user: x['post_count'] = num_posts_per_user[id] return content
def discussion_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.discussion_storage_name() d_graph_iri = URIRef(self.discussion_graph_iri()) v = get_virtuoso(self.session, d_storage_name) discussion_uri = URIRef( Discussion.uri_generic(discussion_id, local_uri)) subjects = [s for (s,) in v.query( """SELECT DISTINCT ?s WHERE { ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))] subjects.append(discussion_uri) participant_ids = list(discussion.get_participants(True)) profiles = {URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} subjects.extend(profiles) # add pseudo-accounts subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id)) for id in participant_ids)) # print len(subjects) cg = ConjunctiveGraph(identifier=d_graph_iri) self.add_subject_data(v, cg, subjects) # add relationships of non-pseudo accounts for ((account, p, profile), g) in v.triples((None, SIOC.account_of, None)): if profile in profiles: cg.add((account, SIOC.account_of, profile, g)) # Tempting: simplify with this. # cg.add((profile, FOAF.account, account, g)) for (s, o, g) in v.query( '''SELECT ?s ?o ?g WHERE { GRAPH ?g {?s catalyst:expressesIdea ?o } . ?o assembl:in_conversation %s }''' % (discussion_uri.n3())): cg.add((s, CATALYST.expressesIdea, o, g)) return cg
def discussion_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.discussion_storage_name() d_graph_iri = URIRef(self.discussion_graph_iri()) v = get_virtuoso(self.session, d_storage_name) discussion_uri = URIRef( Discussion.uri_generic(discussion_id, local_uri)) subjects = [s for (s,) in v.query( """SELECT DISTINCT ?s WHERE { ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))] subjects.append(discussion_uri) participant_ids = list(discussion.get_participants(True)) profiles = {URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} subjects.extend(profiles) # add pseudo-accounts subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id)) for id in participant_ids)) # print len(subjects) cg = ConjunctiveGraph(identifier=d_graph_iri) self.add_subject_data(v, cg, subjects) # add relationships of non-pseudo accounts for ((account, p, profile), g) in v.triples((None, SIOC.account_of, None)): if profile in profiles: cg.add((account, SIOC.account_of, profile, g)) # Tempting: simplify with this. # cg.add((profile, FOAF.account, account, g)) for (s, o, g) in v.query( '''SELECT ?s ?o ?g WHERE { GRAPH ?g {?s catalyst:expressesIdea ?o } . ?o assembl:in_conversation %s }''' % (discussion_uri.n3())): cg.add((s, CATALYST.expressesIdea, o, g)) return cg
def password_change_sent(request): localizer = request.localizer if not request.params.get('sent', False): profile_id = int(request.matchdict.get('profile_id')) profile = AgentProfile.get(profile_id) email = request.params.get('email') if not profile: raise HTTPNotFound("No profile "+str(profile_id)) else: email = email or profile.get_preferred_email() discussion = discussion_from_request(request) send_change_password_email(request, profile, email, discussion=discussion) slug = request.matchdict.get('discussion_slug', None) slug_prefix = "/" + slug if slug else "" profile_id=int(request.matchdict.get('profile_id')) return dict( get_default_context(request), profile_id=profile_id, slug_prefix=slug_prefix, action = "%s/password_change_sent/%d" % (slug_prefix, profile_id), error=request.params.get('error'), title=localizer.translate(_('Password change requested')), description=localizer.translate(_( 'We have sent you an email with a temporary connection link. ' 'Please use that link to log in and change your password.')))
def password_change_sent(request): localizer = request.localizer if not request.params.get('sent', False): profile_id = int(request.matchdict.get('profile_id')) profile = AgentProfile.get(profile_id) email = request.params.get('email') if not profile: raise HTTPNotFound("No profile " + str(profile_id)) else: email = email or profile.get_preferred_email() discussion = discussion_from_request(request) send_change_password_email(request, profile, email, discussion=discussion) profile_id = int(request.matchdict.get('profile_id')) context = get_default_context(request) return dict( context, profile_id=profile_id, action=context['get_route']("password_change_sent", profile_id=profile_id), error=request.params.get('error'), title=localizer.translate(_('Password change requested')), description=localizer.translate( _('We have sent you an email with a temporary connection link. ' 'Please use that link to log in and change your password.')))
def get_instance_of_class(self, cls): from assembl.models import AgentProfile if issubclass(cls, AgentProfile): user_id = self.get_user_id() if user_id and user_id != Everyone: if self._user_cache is None: self._user_cache = AgentProfile.get(user_id) return self._user_cache
def post_agent(request): agent_id = request.matchdict['id'] agent = AgentProfile.get_instance(agent_id) current_user = authenticated_userid(request) if current_user != agent.id: # Only allow post by self. raise HTTPUnauthorized() redirect = False username = request.params.get('username', '').strip() session = AgentProfile.db localizer = request.localizer errors = [] if username and ( agent.username is None or username != agent.username): # check if exists if session.query(Username).filter_by(username=username).count(): errors.append(localizer.translate(_( 'The username %s is already used')) % (username,)) else: old_username = agent.username if old_username is not None: # free existing username session.delete(old_username) session.flush() # add new username session.add(Username(username=username, user=agent)) name = request.params.get('name', '').strip() if name: agent.name = name p1, p2 = (request.params.get('password1', '').strip(), request.params.get('password2', '').strip()) if p1 != p2: errors.append(localizer.translate(_( 'The passwords are not identical'))) elif p1: agent.set_password(p1) add_email = request.params.get('add_email', '').strip() if add_email: if not is_email(add_email): return dict(get_default_context(request), error=localizer.translate(_( "This is not a valid email"))) # No need to check presence since not validated yet email = EmailAccount( email=add_email, profile=agent) session.add(email) if redirect: return HTTPFound(location=request.route_url( 'profile_user', type='u', identifier=username)) profile = session.query(User).get(agent_id) return {}
def put_extract(request): """ Updating an Extract """ extract_id = request.matchdict['id'] user_id = authenticated_userid(request) discussion = request.context if not user_id: # Straight from annotator token = request.headers.get('X-Annotator-Auth-Token') if token: token = decode_token(token, request.registry.settings['session.secret']) if token: user_id = token['userId'] user_id = user_id or Everyone extract = Extract.get_instance(extract_id) if not extract: raise HTTPNotFound("Extract with id '%s' not found." % extract_id) permissions = get_permissions(user_id, discussion.id, extract) if P_EDIT_EXTRACT not in permissions: raise HTTPForbidden() updated_extract_data = json.loads(request.body) extract.owner_id = user_id or AgentProfile.get_database_id( extract.owner_id) extract.order = updated_extract_data.get('order', extract.order) extract.important = updated_extract_data.get('important', extract.important) idea_id = updated_extract_data.get('idIdea', None) if idea_id: idea = Idea.get_instance(idea_id) if (idea.discussion != extract.discussion): raise HTTPBadRequest( "Extract from discussion %s cannot be associated with an idea from a different discussion." % extract.get_discussion_id()) if not idea.has_permission_req(P_ASSOCIATE_EXTRACT): raise HTTPForbidden("Cannot associate extact with this idea") extract.idea = idea else: extract.idea = None Extract.default_db.add(extract) #TODO: Merge ranges. Sigh. return {'ok': True}
def get_agent(request): view_def = request.GET.get('view') or 'default' agent_id = request.matchdict['id'] agent = AgentProfile.get_instance(agent_id) if not agent: raise HTTPNotFound("Agent with id '%s' not found." % agent_id) discussion_id = int(request.matchdict['discussion_id']) user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) agent_json = agent.generic_json(view_def, user_id, permissions) if user_id == agent.id: # We probably should add all profile info here. agent_json['preferred_email'] = agent.get_preferred_email() return agent_json
def get_agent(request): view_def = request.GET.get('view') or 'default' agent_id = request.matchdict['id'] agent = AgentProfile.get_instance(agent_id) if not agent: raise HTTPNotFound("Agent with id '%s' not found." % agent_id) discussion_id = int(request.matchdict['discussion_id']) user_id = request.authenticated_userid or Everyone permissions = get_permissions(user_id, discussion_id) agent_json = agent.generic_json(view_def, user_id, permissions) if user_id == agent.id: # We probably should add all profile info here. agent_json['preferred_email'] = agent.get_preferred_email() return agent_json
def participants_private_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.private_user_storage.name d_graph_iri = self.private_user_storage.sections[0].graph_iri cg = ConjunctiveGraph(identifier=d_graph_iri) v = get_virtuoso(self.session, d_storage_name) v_main = get_virtuoso(self.session, self.discussion_storage_name()) participant_ids = discussion.get_participants(True) profiles={URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} self.add_subject_data(v, cg, profiles) accounts = [account for ((account, p, profile), g) in v_main.triples((None, SIOC.account_of, None)) if profile in profiles] self.add_subject_data(v, cg, accounts) return cg
def participants_private_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.private_user_storage.name d_graph_iri = self.private_user_storage.sections[0].graph_iri cg = ConjunctiveGraph(identifier=d_graph_iri) v = get_virtuoso(self.session, d_storage_name) v_main = get_virtuoso(self.session, self.discussion_storage_name()) participant_ids = discussion.get_participants(True) profiles={URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} self.add_subject_data(v, cg, profiles) accounts = [account for ((account, p, profile), g) in v_main.triples((None, SIOC.account_of, None)) if profile in profiles] self.add_subject_data(v, cg, accounts) return cg
def reset_password(request): identifier = request.json_body.get('identifier') user_id = request.json_body.get('user_id') slug = request.json_body.get('discussion_slug') discussion = None if slug: discussion = Discussion.default_db.query( Discussion).filter_by(slug=slug).first() email = None user = None localizer = request.localizer if user_id: user = AgentProfile.get(int(user_id)) if not user: raise JSONError( localizer.translate(_("The user does not exist")), code=HTTPNotFound.code) if identifier: for account in user.accounts: if identifier == account.email: email = identifier break elif identifier: user, account = from_identifier(identifier) if not user: raise JSONError( localizer.translate(_("This email does not exist")), code=HTTPNotFound.code) if account: email = account.email else: error = localizer.translate(_("Please give an identifier")) raise JSONError(error) if not email: email = user.get_preferred_email() if not email: error = localizer.translate(_("This user has no email")) raise JSONError(error, code=HTTPPreconditionFailed.code) if not isinstance(user, User): error = localizer.translate(_("This is not a user")) raise JSONError(error, code=HTTPPreconditionFailed.code) send_change_password_email(request, user, email, discussion=discussion) return HTTPOk()
def autocomplete(request): keywords = request.GET.get('q') if not keywords: raise HTTPBadRequest("please specify search terms (q)") discussion = request.context.get_instance_of_class(Discussion) query = Discussion.default_db.query(AgentProfile.id, AgentProfile.name) if discussion: query = query.filter(AgentProfile.id.in_( discussion.get_participants_query( True, True, request.authenticated_userid ).subquery())) limit = int(request.GET.get('limit') or 20) query, rank = add_simple_text_search( query, [AgentProfile.name], keywords.split()) query = query.order_by(rank.desc()).limit(limit).all() print(query) return {'results': [{ 'id': AgentProfile.uri_generic(id), 'text': name} for (id, name, rank) in query]}
def _get_agents_real(discussion, user_id=Everyone, view_def=None): agents = AgentProfile.db().query(AgentProfile) permissions = get_permissions(user_id, discussion.id) include_emails = P_ADMIN_DISC in permissions or P_SYSADMIN in permissions if include_emails: agents = agents.options(joinedload(AgentProfile.accounts)) # TODO: Only those in the discussion... # look at permissions, posts, extracts... argh! def view(agent): if view_def: result = agent.generic_json(view_def, user_id, permissions) else: result = agent.serializable() if result is None: return if include_emails or agent.id == user_id: result['preferred_email'] = agent.get_preferred_email() return result return [view(agent) for agent in agents if agent is not None]
def password_change_sent(request): localizer = request.localizer if not request.params.get('sent', False): profile_id = int(request.matchdict.get('profile_id')) profile = AgentProfile.get(profile_id) if not profile: raise HTTPNotFound("No profile " + profile_id) send_change_password_email(request, profile, request.params.get('email', None)) slug = request.matchdict.get('discussion_slug', None) slug_prefix = "/" + slug if slug else "" return dict( get_default_context(request), profile_id=int(request.matchdict.get('profile_id')), slug_prefix=slug_prefix, error=request.params.get('error'), title=localizer.translate(_('Password change requested')), description=localizer.translate( _('We have sent you an email with a temporary connection link. ' 'Please use that link to log in and change your password.')))
def participant_autocomplete(request): ctx = request.context keyword = request.GET.get('q') if not keyword: raise HTTPBadRequest("please specify search terms (q)") limit = request.GET.get('limit', 20) try: limit = int(limit) except: raise HTTPBadRequest("limit must be an integer") if limit > 100: raise HTTPBadRequest("be reasonable") query = AgentProfile.default_db.query( AgentProfile.id, AgentProfile.name, User.username ).outerjoin(User).filter((User.verified == True) | (User.id == None)) discussion = ctx.get_instance_of_class(Discussion) if discussion: query = query.filter(AgentProfile.id.in_( discussion.get_participants_query(True, True).subquery())) if len(keyword) < 6: query = query.add_column(literal(0)) matchstr = '%'.join(keyword) matchstr = '%'.join(('', matchstr, '')) agents = query.filter(AgentProfile.name.ilike(matchstr) | User.username.ilike(matchstr) ).limit(limit * 5).all() agents.sort(key=lambda u: max( jaro_winkler(u[1], keyword), jaro_winkler(u[2], keyword) if u[2] else 0 ), reverse=True) num = min(len(agents), limit) agents = agents[:num] else: matchstr = keyword query, rank = add_simple_text_search( query, [AgentProfile.name], keyword.split()) agents = query.order_by(rank.desc()).limit(limit).all() return {'results': [{ 'id': AgentProfile.uri_generic(id), 'text': name} for (id, name, username, rank) in agents]}
def _get_agents_real(discussion, user_id=None, view_def='default'): user_id = user_id or Everyone agents = discussion.get_participants_query() permissions = get_permissions(user_id, discussion.id) include_emails = P_ADMIN_DISC in permissions or P_SYSADMIN in permissions if include_emails: agents = agents.options(joinedload(AgentProfile.accounts)) num_posts_per_user = \ AgentProfile.count_posts_in_discussion_all_profiles(discussion) def view(agent): result = agent.generic_json(view_def, user_id, permissions) if result is None: return if include_emails or agent.id == user_id: result['preferred_email'] = agent.get_preferred_email() post_count = num_posts_per_user.get(agent.id, 0) if post_count: result['post_count'] = post_count return result return [view(agent) for agent in agents if agent is not None]
def _get_agents_real(discussion, user_id=None, view_def='default'): user_id = user_id or Everyone agents = discussion.get_participants_query() permissions = get_permissions(user_id, discussion.id) include_emails = P_ADMIN_DISC in permissions or P_SYSADMIN in permissions if include_emails: agents = agents.options(joinedload(AgentProfile.accounts)) num_posts_per_user = \ AgentProfile.count_posts_in_discussion_all_profiles(discussion) def view(agent): result = agent.generic_json(view_def, user_id, permissions) if result is None: return if include_emails or agent.id == user_id: result['preferred_email'] = agent.get_preferred_email() post_count = num_posts_per_user.get(agent.id, 0) if post_count: result['post_count'] = post_count return result return [view(agent) for agent in agents if agent is not None]
def home_view(request): user_id = authenticated_userid(request) or Everyone context = get_default_context(request) discussion = context["discussion"] request.session["discussion"] = discussion.slug canRead = user_has_permission(discussion.id, user_id, P_READ) if not canRead and user_id == Everyone: # User isn't logged-in and discussion isn't public: # redirect to login page # need to pass the route to go to *after* login as well # With regards to a next_view, if explicitly stated, then # that is the next view. If not stated, the referer takes # precedence. In case of failure, login redirects to the # discussion which is its context. next_view = request.params.get('next_view', None) if not next_view and discussion: # If referred here from a post url, want to be able to # send the user back. Usually, Assembl will send the user # here to login on private discussions. referrer = request.url next_view = path_qs(referrer) if next_view: login_url = request.route_url("contextual_login", discussion_slug=discussion.slug, _query={"next_view": next_view}) else: login_url = request.route_url( 'contextual_login', discussion_slug=discussion.slug) return HTTPSeeOther(login_url) elif not canRead: # User is logged-in but doesn't have access to the discussion # Would use render_to_response, except for the 401 from pyramid_jinja2 import IJinja2Environment jinja_env = request.registry.queryUtility( IJinja2Environment, name='.jinja2') template = jinja_env.get_template('cannot_read_discussion.jinja2') body = template.render(get_default_context(request)) return Response(body, 401) # if the route asks for a post, get post content (because this is needed for meta tags) route_name = request.matched_route.name if route_name == "purl_posts": post_id = FrontendUrls.getRequestedPostId(request) if not post_id: return HTTPSeeOther(request.route_url( 'home', discussion_slug=discussion.slug)) post = Post.get_instance(post_id) if not post or post.discussion_id != discussion.id: return HTTPSeeOther(request.route_url( 'home', discussion_slug=discussion.slug)) context['post'] = post elif route_name == "purl_idea": idea_id = FrontendUrls.getRequestedIdeaId(request) if not idea_id: return HTTPSeeOther(request.route_url( 'home', discussion_slug=discussion.slug)) idea = Idea.get_instance(idea_id) if not idea or idea.discussion_id != discussion.id: return HTTPSeeOther(request.route_url( 'home', discussion_slug=discussion.slug)) context['idea'] = idea canAddExtract = user_has_permission(discussion.id, user_id, P_ADD_EXTRACT) context['canAddExtract'] = canAddExtract context['canDisplayTabs'] = True preferences = discussion.preferences if user_id != Everyone: from assembl.models import AgentProfile, UserPreferenceCollection user = AgentProfile.get(user_id) preferences = UserPreferenceCollection(user_id, discussion) # TODO: user may not exist. Case of session with BD change. user.is_visiting_discussion(discussion.id) session = Discussion.default_db current_prefs = session.query(UserLanguagePreference).\ filter_by(user_id=user_id).all() # user = session.query(User).filter_by(id=user_id).first() if '_LOCALE_' in request.cookies: locale = request.cookies['_LOCALE_'] process_locale(locale, user_id, current_prefs, session, LanguagePreferenceOrder.Cookie) elif '_LOCALE_' in request.params: locale = request.params['_LOCALE_'] process_locale(locale, user_id, current_prefs, session, LanguagePreferenceOrder.Parameter) else: locale = locale_negotiator(request) process_locale(locale, user_id, current_prefs, session, LanguagePreferenceOrder.OS_Default) else: locale = request.localizer.locale_name try: service = discussion.translation_service() locale_labels = json.dumps( service.target_locale_labels( Locale.get_or_create(locale, discussion.db)) if service else {}) except: locale_labels = '{}' context['translation_locale_names_json'] = locale_labels context['preferences_json'] = json.dumps(dict(preferences)) response = render_to_response('../../templates/index.jinja2', context, request=request) # Prevent caching the home, especially for proper login/logout response.cache_control.max_age = 0 response.cache_control.prevent_auto = True return response
def get_data(content): """Return uid, dict of fields we want to index, return None if we don't index.""" from assembl.models import Idea, Post, SynthesisPost, AgentProfile, LangString, Extract, Question if type(content) == Idea: # only index Idea, not Thematic or Question data = {} for attr in ('creation_date', 'id', 'discussion_id'): data[attr] = getattr(content, attr) populate_from_langstring_prop(content, data, 'title') populate_from_langstring_prop(content, data, 'synthesis_title') populate_from_langstring_prop(content, data, 'description') announcement = content.get_applicable_announcement() if announcement: populate_from_langstring_prop(announcement, data, 'title', 'announcement_title') populate_from_langstring_prop(announcement, data, 'body', 'announcement_body') phase = content.get_associated_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier data['message_view_override'] = content.message_view_override return get_uid(content), data elif isinstance(content, AgentProfile): data = {} for attr in ('creation_date', 'id', 'name'): data[attr] = getattr(content, attr, None) # AgentProfile doesn't have creation_date, User does. # get all discussions that the user is in via AgentStatusInDiscussion data['discussion_id'] = set([s.discussion_id for s in content.agent_status_in_discussion]) # get discussion_id for all posts of this agent data['discussion_id'] = list( data['discussion_id'].union( [post.discussion_id for post in content.posts_created] ) ) return get_uid(content), data elif isinstance(content, Post): data = {} data['_parent'] = 'user:{}'.format(content.creator_id) if content.parent_id is not None: data['parent_creator_id'] = content.parent.creator_id for attr in ('discussion_id', 'creation_date', 'id', 'parent_id', 'creator_id', 'sentiment_counts'): data[attr] = getattr(content, attr) data['creator_display_name'] = AgentProfile.get(content.creator_id).display_name() data['sentiment_tags'] = [key for key in data['sentiment_counts'] if data['sentiment_counts'][key] > 0] like = data['sentiment_counts']['like'] disagree = data['sentiment_counts']['disagree'] dont_understand = data['sentiment_counts']['dont_understand'] more_info = data['sentiment_counts']['more_info'] all_sentiments = [like, disagree, dont_understand, more_info] data['sentiment_counts']['total'] = sum(all_sentiments) data['sentiment_counts']['popularity'] = like - disagree data['sentiment_counts']['consensus'] = max(all_sentiments) / ((sum(all_sentiments) / len(all_sentiments)) or 1) data['sentiment_counts']['controversy'] = max(like, disagree, 1) / min(like or 1, disagree or 1) data['type'] = content.type # this is the subtype (assembl_post, email...) # data['publishes_synthesis_id'] = getattr( # content, 'publishes_synthesis_id', None) phase = content.get_created_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier if isinstance(content, SynthesisPost): populate_from_langstring_prop(content.publishes_synthesis, data, 'subject') populate_from_langstring_prop(content.publishes_synthesis, data, 'introduction') populate_from_langstring_prop(content.publishes_synthesis, data, 'conclusion') long_titles = [idea.synthesis_title for idea in content.publishes_synthesis.ideas if idea.synthesis_title] long_titles_c = defaultdict(list) for ls in long_titles: for e in ls.entries: if e.value: long_titles_c[strip_country(e.base_locale)].append(e.value) ls = LangString() for locale, values in long_titles_c.iteritems(): ls.add_value(' '.join(values), locale) populate_from_langstring(ls, data, 'ideas') else: idea_id = get_idea_id_for_post(content) if not idea_id: return None, None data['idea_id'] = idea_id related_idea = Idea.get(idea_id[0]) data['message_view_override'] = related_idea.message_view_override if isinstance(related_idea, Question): related_idea = related_idea.parents[0] # we take the title of the first idea in the list for now (in v2, posts are attached to only one idea) populate_from_langstring_prop( related_idea, data, 'title', 'idea_title') populate_from_langstring_prop(content, data, 'body') populate_from_langstring_prop(content, data, 'subject') return get_uid(content), data elif isinstance(content, Extract): data = {} for attr in ('discussion_id', 'body', 'creation_date', 'id', 'creator_id'): data[attr] = getattr(content, attr) data['post_id'] = content.content_id post = Post.get(content.content_id) populate_from_langstring_prop(post, data, 'subject') phase = post.get_created_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier idea_id = get_idea_id_for_post(post) if not idea_id: return None, None data['idea_id'] = idea_id # we take the title of the first idea in the list for now (in v2, posts are attached to only one idea) related_idea = Idea.get(idea_id[0]) data['message_view_override'] = related_idea.message_view_override if isinstance(related_idea, Question): related_idea = related_idea.parents[0] populate_from_langstring_prop( related_idea, data, 'title', 'idea_title') data['extract_state'] = 'taxonomy_state.' + content.extract_state if content.extract_nature: data['extract_nature'] = 'taxonomy_nature.' + content.extract_nature.name if content.extract_action: data['extract_action'] = 'taxonomy_action.' + content.extract_action.name data['creator_display_name'] = AgentProfile.get(content.creator_id).display_name() return get_uid(content), data return None, None
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = request.authenticated_userid or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ).filter(PostClass.type != 'proposition_post') ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c( discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter( PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [classifier[0] != "!" for classifier in message_classifiers] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest(_("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_(message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_(message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} my_sentiments = {l.post_id: l for l in discussion.db.query( SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id) ).join(SentimentOfPost ).filter(PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition() ).group_by(PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][ sentiment_type[SentimentOfPost.TYPE_PREFIX_LEN:] ] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query( PostClass).filter(PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post['sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def test_count_posts_in_discussion_all_profiles(test_app, discussion, admin_user, proposals): from assembl.models import AgentProfile result = AgentProfile.count_posts_in_discussion_all_profiles(discussion) assert result[admin_user.id] == 15
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [ classifier[0] != "!" for classifier in message_classifiers ] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest( _("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_( message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_( message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } my_sentiments = { l.post_id: l for l in discussion.db.query(SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id)).join(SentimentOfPost).filter( PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition()).group_by( PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][sentiment_type[ SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content(post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post[ 'sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_data(content): """Return uid, dict of fields we want to index, return None if we don't index.""" from assembl.models import Idea, Post, SynthesisPost, AgentProfile, LangString, Extract if type(content) == Idea: # only index Idea, not Thematic or Question data = {} for attr in ('creation_date', 'id', 'discussion_id'): data[attr] = getattr(content, attr) populate_from_langstring_prop(content, data, 'title') populate_from_langstring_prop(content, data, 'synthesis_title') populate_from_langstring_prop(content, data, 'description') announcement = content.get_applicable_announcement() if announcement: populate_from_langstring_prop(announcement, data, 'title', 'announcement_title') populate_from_langstring_prop(announcement, data, 'body', 'announcement_body') phase = content.get_associated_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier return get_uid(content), data elif isinstance(content, AgentProfile): data = {} for attr in ('creation_date', 'id', 'name'): data[attr] = getattr(content, attr, None) # AgentProfile doesn't have creation_date, User does. # get all discussions that the user is in via AgentStatusInDiscussion data['discussion_id'] = set( [s.discussion_id for s in content.agent_status_in_discussion]) # get discussion_id for all posts of this agent data['discussion_id'] = list(data['discussion_id'].union( [post.discussion_id for post in content.posts_created])) return get_uid(content), data elif isinstance(content, Post): data = {} data['_parent'] = 'user:{}'.format(content.creator_id) if content.parent_id is not None: data['parent_creator_id'] = content.parent.creator_id for attr in ('discussion_id', 'creation_date', 'id', 'parent_id', 'creator_id', 'sentiment_counts'): data[attr] = getattr(content, attr) data['creator_display_name'] = AgentProfile.get( content.creator_id).display_name() data['sentiment_tags'] = [ key for key in data['sentiment_counts'] if data['sentiment_counts'][key] > 0 ] like = data['sentiment_counts']['like'] disagree = data['sentiment_counts']['disagree'] dont_understand = data['sentiment_counts']['dont_understand'] more_info = data['sentiment_counts']['more_info'] all_sentiments = [like, disagree, dont_understand, more_info] data['sentiment_counts']['total'] = sum(all_sentiments) data['sentiment_counts']['popularity'] = like - disagree data['sentiment_counts']['consensus'] = max(all_sentiments) / ( (sum(all_sentiments) / len(all_sentiments)) or 1) data['sentiment_counts']['controversy'] = max(like, disagree, 1) / min( like or 1, disagree or 1) data[ 'type'] = content.type # this is the subtype (assembl_post, email...) # data['publishes_synthesis_id'] = getattr( # content, 'publishes_synthesis_id', None) phase = content.get_created_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier if isinstance(content, SynthesisPost): populate_from_langstring_prop(content.publishes_synthesis, data, 'subject') populate_from_langstring_prop(content.publishes_synthesis, data, 'introduction') populate_from_langstring_prop(content.publishes_synthesis, data, 'conclusion') long_titles = [ idea.synthesis_title for idea in content.publishes_synthesis.ideas if idea.synthesis_title ] long_titles_c = defaultdict(list) for ls in long_titles: for e in ls.entries: if e.value: long_titles_c[strip_country(e.base_locale)].append( e.value) ls = LangString() for locale, values in long_titles_c.iteritems(): ls.add_value(' '.join(values), locale) populate_from_langstring(ls, data, 'ideas') else: idea_id = get_idea_id_for_post(content) if not idea_id: return None, None data['idea_id'] = idea_id populate_from_langstring_prop(content, data, 'body') populate_from_langstring_prop(content, data, 'subject') return get_uid(content), data elif isinstance(content, Extract): data = {} for attr in ('discussion_id', 'body', 'creation_date', 'id', 'creator_id'): data[attr] = getattr(content, attr) data['post_id'] = content.content_id post = Post.get(content.content_id) populate_from_langstring_prop(post, data, 'subject') phase = post.get_created_phase() if phase: data['phase_id'] = phase.id data['phase_identifier'] = phase.identifier idea_id = get_idea_id_for_post(post) if not idea_id: return None, None data['idea_id'] = idea_id data['extract_state'] = 'taxonomy_state.' + content.extract_state if content.extract_nature: data[ 'extract_nature'] = 'taxonomy_nature.' + content.extract_nature.name if content.extract_action: data[ 'extract_action'] = 'taxonomy_action.' + content.extract_action.name data['creator_display_name'] = AgentProfile.get( content.creator_id).display_name() return get_uid(content), data return None, None
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author keyword: use full-text search locale: restrict to locale """ localizer = request.localizer discussion = request.context discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = request.permissions DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 keywords = request.GET.getall('keyword') order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score' and not keywords: raise HTTPBadRequest("Cannot ask for a score without keywords") if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = Post.get_database_id(root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = Post.get_database_id(family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = Idea.get_database_id(root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [Post.get_database_id(id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = AgentProfile.get_database_id(post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = AgentProfile.get_database_id(post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion == discussion, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion.id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion.id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) if keywords: locales = request.GET.getall('locale') posts, rank = add_text_search(posts, (PostClass.body_id, ), keywords, locales, order == 'score') # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion.id)) } liked_posts = { l.post_id: l.id for l in discussion.db.query(LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion.id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def in ('partial_post', 'id_only'): pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(rank.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.like_count.desc(), Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def in ('partial_post', 'id_only'): pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) if view_def == 'id_only': posts = posts.with_entities(PostClass.id) for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] no_of_posts += 1 if view_def == 'id_only': post_data.append(Content.uri_generic(post)) continue if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def not in ("partial_post", "id_only"): translate_content(post, translation_table=translations, service=service) serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = (LikedPost.uri_generic(likedpost) if likedpost else False) if view_def not in ("partial_post", "id_only"): serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion.id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(data["total"] / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[ bindparam('discussion_id', discussion_id) ]).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id == orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest( localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden == asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[ bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id) ]).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } liked_posts = { l.post_id: l.id for l in discussion.db.query(LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) #posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options(joinedload_all(Post.creator)) posts = posts.options(joinedload_all(Post.extracts)) posts = posts.options(joinedload_all(Post.widget_idea_links)) posts = posts.options(joinedload_all( SynthesisPost.publishes_synthesis)) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = LikedPost.uri_generic( likedpost) if likedpost else False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def home_view(request): user_id = authenticated_userid(request) or Everyone context = get_default_context(request) discussion = context["discussion"] request.session["discussion"] = discussion.slug canRead = user_has_permission(discussion.id, user_id, P_READ) if not canRead and user_id == Everyone: # User isn't logged-in and discussion isn't public: # redirect to login page login_url = request.route_url('contextual_login', discussion_slug=discussion.slug) return HTTPSeeOther(login_url) elif not canRead: # User is logged-in but doesn't have access to the discussion return HTTPUnauthorized() # if the route asks for a post, get post content (because this is needed for meta tags) route_name = request.matched_route.name if route_name == "purl_posts": post_id = FrontendUrls.getRequestedPostId(request) if not post_id: return HTTPSeeOther( request.route_url('home', discussion_slug=discussion.slug)) post = Post.get_instance(post_id) if not post or post.discussion_id != discussion.id: return HTTPSeeOther( request.route_url('home', discussion_slug=discussion.slug)) context['post'] = post elif route_name == "purl_idea": idea_id = FrontendUrls.getRequestedIdeaId(request) if not idea_id: return HTTPSeeOther( request.route_url('home', discussion_slug=discussion.slug)) idea = Idea.get_instance(idea_id) if not idea or idea.discussion_id != discussion.id: return HTTPSeeOther( request.route_url('home', discussion_slug=discussion.slug)) context['idea'] = idea canAddExtract = user_has_permission(discussion.id, user_id, P_ADD_EXTRACT) context['canAddExtract'] = canAddExtract context['canDisplayTabs'] = True if user_id != Everyone: from assembl.models import AgentProfile user = AgentProfile.get(user_id) # TODO: user may not exist. Case of session with BD change. user.is_visiting_discussion(discussion.id) session = Discussion.default_db current_prefs = session.query(UserLanguagePreference).\ filter_by(user_id = user_id).all() user = session.query(User).filter_by(id=user_id).first() def validate_locale(l): return ensure_locale_has_country(to_posix_format(locale)) if '_LOCALE_' in request.cookies: locale = request.cookies['_LOCALE_'] posix_locale = validate_locale(locale) process_locale(posix_locale, user_id, current_prefs, session, LanguagePreferenceOrder.Cookie) elif '_LOCALE_' in request.params: locale = request.params['_LOCALE_'] posix_locale = validate_locale(locale) process_locale(posix_locale, user_id, current_prefs, session, LanguagePreferenceOrder.Parameter) else: locale = default_locale_negotiator(request) posix_locale = validate_locale(locale) process_locale(posix_locale, user_id, current_prefs, session, LanguagePreferenceOrder.OS_Default) response = render_to_response('../../templates/index.jinja2', context, request=request) # Prevent caching the home, especially for proper login/logout response.cache_control.max_age = 0 response.cache_control.prevent_auto = True return response
def test_count_posts_in_discussion_all_profiles(test_app, discussion, admin_user, proposals): from assembl.models import AgentProfile result = AgentProfile.count_posts_in_discussion_all_profiles(discussion) assert result[admin_user.id] == 15
def home_view(request): user_id = authenticated_userid(request) or Everyone context = get_default_context(request) discussion = context["discussion"] canRead = user_has_permission(discussion.id, user_id, P_READ) if not canRead and user_id == Everyone: # User isn't logged-in and discussion isn't public: # redirect to login page login_url = request.route_url( 'contextual_login', discussion_slug=discussion.slug) return HTTPSeeOther(login_url) elif not canRead: # User is logged-in but doesn't have access to the discussion return HTTPUnauthorized() # if the route asks for a post, get post content (because this is needed for meta tags) route_name = request.matched_route.name if route_name == "purl_posts": post_id = FrontendUrls.getRequestedPostId(request) if post_id: post = Post.get_instance(post_id) if post and post.discussion_id == discussion.id: context['post'] = post elif route_name == "purl_idea": idea_id = FrontendUrls.getRequestedIdeaId(request) if idea_id: idea = Idea.get_instance(idea_id) if idea and idea.discussion_id == discussion.id: context['idea'] = idea canAddExtract = user_has_permission(discussion.id, user_id, P_ADD_EXTRACT) context['canAddExtract'] = canAddExtract context['canDisplayTabs'] = True if user_id != Everyone: from assembl.models import AgentProfile user = AgentProfile.get(user_id) # TODO: user may not exist. Case of session with BD change. user.is_visiting_discussion(discussion.id) session = Discussion.db() current_prefs = session.query(UserLanguagePreference).\ filter_by(user_id = user_id).all() user = session.query(User).filter_by(id = user_id).first() if '_LOCALE_' in request.cookies: locale = request.cookies['_LOCALE_'] posix_locale = to_posix_format(locale) process_locale(posix_locale,user_id, current_prefs, session, LanguagePreferenceOrder.Cookie) elif '_LOCALE_' in request.params: locale = request.params['_LOCALE_'] posix_locale = to_posix_format(locale) process_locale(posix_locale, user_id, current_prefs, session, LanguagePreferenceOrder.Parameter) else: locale = default_locale_negotiator(request) posix_locale = to_posix_format(locale) process_locale(posix_locale, user_id, current_prefs, session, LanguagePreferenceOrder.OS_Default) response = render_to_response('../../templates/index.jinja2', context, request=request) # Prevent caching the home, especially for proper login/logout response.cache_control.max_age = 0 response.cache_control.prevent_auto = True return response
def reset_password(request): identifier = request.json_body.get('identifier') user_id = request.json_body.get('user_id') slug = request.json_body.get('discussion_slug') logger = logging.getLogger() discussion = None if slug: discussion = Discussion.default_db.query(Discussion).filter_by( slug=slug).first() email = None user = None localizer = request.localizer if user_id: user = AgentProfile.get(int(user_id)) if not user: logger.error( "[Password reset] The user with the identifier %s does not exist" % (identifier)) return HTTPOk(location=maybe_contextual_route( request, 'password_change_sent', profile_id=user_id, _query=dict(email=identifier if '@' in identifier else ''))) if identifier: for account in user.accounts: if identifier == account.email: email = identifier break elif identifier: user, account = from_identifier(identifier) if not user: logger.error( "[Password reset] The user with the identifier %s does not exist" % (identifier)) return HTTPOk(location=maybe_contextual_route( request, 'password_change_sent', profile_id=user_id, _query=dict(email=identifier if '@' in identifier else ''))) if account: email = account.email else: error = localizer.translate(_("Please give an identifier")) raise JSONError(error) if not email: if user: email = user.get_preferred_email() else: email = None if not email: logger.error( "[Password reset] The user with the identifier %s does not exist" % (user.id)) return HTTPOk(location=maybe_contextual_route( request, 'password_change_sent', profile_id=user_id, _query=dict(email=identifier if '@' in identifier else ''))) if not isinstance(user, User): logger.error("This is not a user.") return HTTPOk(location=maybe_contextual_route( request, 'password_change_sent', profile_id=user_id, _query=dict(email=identifier if '@' in identifier else ''))) send_change_password_email(request, user, email, discussion=discussion) return HTTPOk()
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post ideaContentLinkQuery = discussion.db.query( PostClass.id, PostClass.idea_content_links_above_post) if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden==asbool(hidden)) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.hidden==asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) ideaContentLinkQuery = ideaContentLinkQuery.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) ideaContentLinkQuery = posts.filter( ideaContentLinkQuery.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) ideaContentLinkQuery = ideaContentLinkQuery.join( parent_alias, PostClass.parent) ideaContentLinkQuery = ideaContentLinkQuery.filter( parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} liked_posts = {l.post_id: l.id for l in discussion.db.query( LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = user_pref_as_translation_table(user, service) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) ideaContentLinkQuery = ideaContentLinkQuery.filter( Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) else: posts = posts.order_by(Content.id) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = ( LikedPost.uri_generic(likedpost) if likedpost else False) if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def reset_password(request): identifier = request.json_body.get('identifier') user_id = request.json_body.get('user_id') slug = request.json_body.get('discussion_slug') logger = logging.getLogger() discussion = None if slug: discussion = Discussion.default_db.query( Discussion).filter_by(slug=slug).first() email = None user = None localizer = request.localizer if user_id: user = AgentProfile.get(int(user_id)) if not user: if not discussion.preferences['generic_errors']: raise JSONError( localizer.translate(_("The user does not exist")), code=HTTPNotFound.code) else: raise JSONError( localizer.translate(generic_error_message), code=HTTPNotFound.code) logger.error("[Password reset] The user with the identifier %s does not exist" % (identifier)) if identifier: for account in user.accounts: if identifier == account.email: email = identifier break elif identifier: user, account = from_identifier(identifier) if not user: if not discussion.preferences['generic_errors']: raise JSONError( localizer.translate(_("This email does not exist")), code=HTTPNotFound.code) else: raise JSONError( localizer.translate(_(generic_error_message)), code=HTTPNotFound.code) logger.error("This email does not exist.") if account: email = account.email else: error = localizer.translate(_("Please give an identifier")) raise JSONError(error) if not email: email = user.get_preferred_email() if not email: if not discussion.preferences['generic_errors']: error = localizer.translate(_("This user has no email")) else: error = localizer.translate(_(generic_error_message)) logger.error("This user has no email.") raise JSONError(error, code=HTTPPreconditionFailed.code) if not isinstance(user, User): if not discussion.preferences['generic_errors']: error = localizer.translate(_("This is not a user")) else: error = localizer.translate(_(generic_error_message)) logger.error("This is not a user.") raise JSONError(error, code=HTTPPreconditionFailed.code) send_change_password_email(request, user, email, discussion=discussion) return HTTPOk()