def discussion_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.discussion_storage_name() d_graph_iri = URIRef(self.discussion_graph_iri()) v = get_virtuoso(self.session, d_storage_name) discussion_uri = URIRef( Discussion.uri_generic(discussion_id, local_uri)) subjects = [s for (s,) in v.query( """SELECT DISTINCT ?s WHERE { ?s assembl:in_conversation %s }""" % (discussion_uri.n3()))] subjects.append(discussion_uri) participant_ids = list(discussion.get_participants(True)) profiles = {URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} subjects.extend(profiles) # add pseudo-accounts subjects.extend((URIRef("%sAgentAccount/%d" % (local_uri, id)) for id in participant_ids)) # print len(subjects) cg = ConjunctiveGraph(identifier=d_graph_iri) self.add_subject_data(v, cg, subjects) # add relationships of non-pseudo accounts for ((account, p, profile), g) in v.triples((None, SIOC.account_of, None)): if profile in profiles: cg.add((account, SIOC.account_of, profile, g)) # Tempting: simplify with this. # cg.add((profile, FOAF.account, account, g)) for (s, o, g) in v.query( '''SELECT ?s ?o ?g WHERE { GRAPH ?g {?s catalyst:expressesIdea ?o } . ?o assembl:in_conversation %s }''' % (discussion_uri.n3())): cg.add((s, CATALYST.expressesIdea, o, g)) return cg
def discussion_from_request(request): from ..models import Discussion from assembl.views.traversal import TraversalContext if request.matchdict: if 'discussion_id' in request.matchdict: discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound("No discussion ID %d" % (discussion_id, )) return discussion elif 'discussion_slug' in request.matchdict: slug = request.matchdict['discussion_slug'] discussion = find_discussion_from_slug(slug) if not discussion: raise HTTPNotFound("No discussion found for slug=%s" % (slug, )) return discussion if getattr(request, "context", None) and isinstance( request.context, TraversalContext): discussion_id = request.context.get_discussion_id() if discussion_id: return Discussion.get(discussion_id) if request.session.get("discussion", None): slug = request.session["discussion"] discussion = find_discussion_from_slug(slug) if not discussion: raise HTTPNotFound("No discussion found for slug=%s" % (slug, )) return discussion
def delete_discussion(session, discussion_id): from assembl.models import Discussion, DiscussionBoundBase # First, delete the discussion. session.delete(Discussion.get(discussion_id)) session.flush() # See if anything is left... classes = DiscussionBoundBase._decl_class_registry.itervalues() classes_by_table = { cls.__dict__.get('__table__', None): cls for cls in classes} # Only direct subclass of abstract concrete_classes = set(filter(lambda cls: issubclass(cls, DiscussionBoundBase) and (not isabstract(cls)) and isabstract(cls.mro()[1]), classes_by_table.values())) tables = DiscussionBoundBase.metadata.sorted_tables tables.reverse() for table in tables: if table not in classes_by_table: continue cls = classes_by_table[table] if cls not in concrete_classes: continue print 'deleting', cls.__name__ query = session.query(cls.id) conds = cls.get_discussion_conditions(discussion_id) assert conds cond = and_(*conds) v = JoinColumnsVisitor(cls, query, classes_by_table) v.traverse(cond) query = v.final_query().filter(cond) if query.count(): print "*" * 20, "Not all deleted!" session.query(cls).filter( cls.id.in_(query.subquery())).delete(False)
def upload_file(request): """ POSTing a file upload is very different than any other endpoint in assembl API because all of the content will be passed in using a MULTIPART_HEADER, with all of data as well as the file (along with its metadata) """ # Testing purposes on front-end # raise Exception("Upload file exception occured!") db = Document.default_db ctx = request.context user_id = authenticated_userid(request) or Everyone discusison_id = ctx.get_discussion_id() discussion = Discussion.get(discusison_id) mime = request.POST['mime_type'] file_name = request.POST['name'] # Check if the file has previously existed, if so, change the name by appending "(n)" # to it's name try: blob = File(discussion=discussion, mime_type=mime, title=file_name) db.add(blob) with request.POST['file'].file as f: blob.add_file_data(f) db.flush() except Exception as e: raise HTTPServerError(e) view = 'default' return blob.generic_json(view, user_id, ctx.get_permissions())
def get_ideas(request): discussion_id = request.matchdict['discussion_id'] discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get('view') ids = request.GET.getall('ids') return _get_ideas_real(discussion=discussion, view_def=view_def, ids=ids)
def get_agents(request, discussion_only=False): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get('view') return _get_agents_real(discussion, request.authenticated_userid, view_def)
def get_agents(request, discussion_only=False): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get('view') return _get_agents_real( discussion, authenticated_userid(request), view_def)
def get_ideas(request): user_id = authenticated_userid(request) or Everyone discussion_id = int(request.matchdict["discussion_id"]) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get("view") ids = request.GET.getall("ids") return _get_ideas_real(discussion=discussion, view_def=view_def, ids=ids, user_id=user_id)
def add_local_role(request): # Do not use check_permissions, this is a special case ctx = request.context user_id = request.authenticated_userid if not user_id: raise HTTPUnauthorized() discussion_id = ctx.get_discussion_id() discussion = Discussion.get(discussion_id) user_uri = User.uri_generic(user_id) if discussion_id is None: raise HTTPBadRequest() permissions = get_permissions(user_id, discussion_id) json = request.json_body if "discussion" not in json: json["discussion"] = Discussion.uri_generic(discussion_id) requested_user = json.get('user', None) if not requested_user: json['user'] = requested_user = user_uri elif requested_user != user_uri and P_ADMIN_DISC not in permissions: raise HTTPUnauthorized() if P_ADMIN_DISC not in permissions: if P_SELF_REGISTER in permissions: json['requested'] = False json['role'] = R_PARTICIPANT req_user = User.get_instance(requested_user) if not discussion.check_authorized_email(req_user): raise HTTPForbidden() elif P_SELF_REGISTER_REQUEST in permissions: json['requested'] = True else: raise HTTPUnauthorized() try: instances = ctx.create_object("LocalUserRole", json, user_id) except HTTPClientError as e: raise e except Exception as e: raise HTTPBadRequest(e) if instances: first = instances[0] db = first.db for instance in instances: db.add(instance) db.flush() # Side effect: materialize subscriptions. if not first.requested: # relationship may not be initialized user = first.user or User.get(first.user_id) user.get_notification_subscriptions(discussion_id, True) # Update the user's AgentStatusInDiscussion user.update_agent_status_subscribe(discussion) view = request.GET.get('view', None) or 'default' permissions = get_permissions( user_id, ctx.get_discussion_id()) return CreationResponse(first, user_id, permissions, view)
def get_ideas(request): user_id = authenticated_userid(request) or Everyone discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get('view') ids = request.GET.getall('ids') return _get_ideas_real(discussion=discussion, view_def=view_def, ids=ids, user_id=user_id)
def __acl__(self): if getattr(self._instance, '__acl__', None): return self._instance.__acl__ if getattr(self._instance, 'discussion', None): return self._instance.discussion.__acl__ discussion_id = self.get_discussion_id() if discussion_id: from assembl.models import Discussion return Discussion.get(discussion_id).__acl__ return self.__parent__.__acl__
def add_local_role(request): # Do not use check_permissions, this is a special case ctx = request.context user_id = request.authenticated_userid if not user_id: raise HTTPUnauthorized() discussion_id = ctx.get_discussion_id() discussion = Discussion.get(discussion_id) user_uri = User.uri_generic(user_id) if discussion_id is None: raise HTTPBadRequest() permissions = get_permissions(user_id, discussion_id) json = request.json_body if "discussion" not in json: json["discussion"] = Discussion.uri_generic(discussion_id) requested_user = json.get('user', None) if not requested_user: json['user'] = requested_user = user_uri elif requested_user != user_uri and P_ADMIN_DISC not in permissions: raise HTTPUnauthorized() if P_ADMIN_DISC not in permissions: if P_SELF_REGISTER in permissions: json['requested'] = False json['role'] = R_PARTICIPANT req_user = User.get_instance(requested_user) if not discussion.check_authorized_email(req_user): raise HTTPForbidden() elif P_SELF_REGISTER_REQUEST in permissions: json['requested'] = True else: raise HTTPUnauthorized() try: instances = ctx.create_object("LocalUserRole", json, user_id) except HTTPClientError as e: raise e except Exception as e: raise HTTPBadRequest(e) if instances: first = instances[0] db = first.db for instance in instances: db.add(instance) db.flush() # Side effect: materialize subscriptions. if not first.requested: # relationship may not be initialized user = first.user or User.get(first.user_id) user.get_notification_subscriptions(discussion_id, True) # Update the user's AgentStatusInDiscussion user.update_agent_status_subscribe(discussion) view = request.GET.get('view', None) or 'default' permissions = get_permissions(user_id, ctx.get_discussion_id()) return CreationResponse(first, user_id, permissions, view)
def get_extracts(request): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( "Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get('view') ids = request.GET.getall('ids') return _get_extracts_real( discussion, view_def, ids, authenticated_userid(request))
def get_extracts(request): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) view_def = request.GET.get('view') ids = request.GET.getall('ids') return _get_extracts_real(discussion, view_def, ids, request.authenticated_userid)
def get_syntheses(request): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) syntheses = discussion.get_all_syntheses() view_def = request.GET.get('view') or 'default' res = [synthesis.generic_json(view_def, user_id, permissions) for synthesis in syntheses] return [x for x in res if x is not None]
def get_discussion_semantic_analysis( discussion_id, num_topics=200, model_cls=gmodels.lsimodel.LsiModel, **model_kwargs): discussion = Discussion.get(discussion_id) lang = discussion.discussion_locales[0].split('_')[0] dirname = join(nlp_data, lang) dict_fname = join(dirname, DICTIONARY_FNAME) # rebuild dico in all cases to ensure complete corpus corpus = create_dictionaries(discussion_id) dictionary = corpora.Dictionary.load(dict_fname) post_ids = discussion.db.query(Content.id).filter_by( discussion_id=discussion_id) doc_count = post_ids.count() if doc_count < 10: return None, None post_ids = [x for (x,) in post_ids] subcorpus = corpus[post_ids] tfidf_model = gmodels.TfidfModel(id2word=dictionary) tfidf_fname = join(dirname, "tfidf_%d.model" % (discussion_id,)) model_fname = join(dirname, "model_%s_%d.model" % ( model_cls.__name__, discussion_id,)) gensim_model = model_cls( id2word=dictionary, num_topics=num_topics, **model_kwargs) if exists(tfidf_fname): tfidf_model = tfidf_model.load(tfidf_fname) # assumption: count implies identity. # Wrong in corner cases: hidden, etc. if tfidf_model.num_docs == doc_count: if exists(model_fname): gensim_model = gensim_model.load(model_fname) same_kwargs = all(( getattr(gensim_model, k) == v for (k, v) in model_kwargs.iteritems())) same_kwargs = same_kwargs and getattr( gensim_model, 'num_updates', doc_count) == doc_count if same_kwargs and gensim_model.num_topics == num_topics: return (subcorpus, tfidf_model, gensim_model) elif exists(tfidf_fname): unlink(tfidf_fname) if exists(model_fname): unlink(model_fname) if tfidf_model.num_docs != doc_count: tfidf_model.initialize(subcorpus) tfidf_model.save(tfidf_fname) tfidf_corpus = tfidf_model[subcorpus] if getattr(gensim_model, 'update', None): gensim_model.update(tfidf_corpus) elif getattr(gensim_model, 'add_documents', None): gensim_model.add_documents(tfidf_corpus) gensim_model.save(model_fname) return (subcorpus, tfidf_model, gensim_model)
def maybe_contextual_route(request, route_name, **args): discussion_slug = request.matchdict.get('discussion_slug', None) if discussion_slug is None: discussion_id = request.matchdict.get('discussion_id', None) if discussion_id is None: return request.route_url(route_name, **args) else: discussion = Discussion.get(int(discussion_id)) return request.route_url( 'contextual_'+route_name, discussion_slug=discussion.slug, **args) else: return request.route_url( 'contextual_'+route_name, discussion_slug=discussion_slug, **args)
def add_multiple_users_csv( request, csv_file, discussion_id, with_role, send_password_change=False, message_subject=None, text_message=None, html_message=None): r = reader(csv_file) localizer = request.localizer for i, l in enumerate(r): if not len(l): # tolerate empty lines continue l = [x.decode('utf-8').strip() for x in l] if send_password_change: if len(l) != 2: raise RuntimeError(localizer.translate(_( "The CSV file must have two columns"))) (name, email) = l password = base64.urlsafe_b64encode(urandom(8)) else: if len(l) != 3: raise RuntimeError(localizer.translate(_( "The CSV file must have three columns"))) (name, email, password) = l if not is_email(email): if i == 0: # Header continue raise RuntimeError(localizer.translate(_( "Not an email: <%s> at line %d")) % (email, i)) if len(name) < 5: raise RuntimeError(localizer.translate(_( "Name too short: <%s> at line %d")) % (name, i)) if len(password) < 4: raise RuntimeError(localizer.translate(_( "Password too short: <%s> at line %d")) % (password, i)) (user, is_new) = add_user( name, email, password, None, True, localrole=with_role, discussion=discussion_id, change_old_password=False) if is_new and send_password_change: from assembl.views.auth.views import send_change_password_email from assembl.models import Discussion discussion = Discussion.get(discussion_id) send_change_password_email( request, user, email, subject=message_subject, text_body=text_message, html_body=html_message, discussion=discussion) return i
def add_multiple_users_csv( request, csv_file, discussion_id, with_role, send_password_change=False, message_subject=None, text_message=None, html_message=None, sender_name=None, resend_if_not_logged_in=False): csv_file = TextIOWrapper(BytesIO(csv_file.read()), 'utf-8') r = reader(csv_file, skipinitialspace=True) localizer = request.localizer for i, l in enumerate(r): if not len(l): # tolerate empty lines continue l = [x.strip() for x in l] if len(l) != 2: raise RuntimeError(localizer.translate(_( "The CSV file must have two columns"))) (name, email) = l if not is_email(email): if i == 0: # Header continue raise RuntimeError(localizer.translate(_( "Not an email: <%s> at line %d")) % (email, i)) if len(name) < 5: raise RuntimeError(localizer.translate(_( "Name too short: <%s> at line %d")) % (name, i)) (user, created_user, created_localrole) = add_user( name, email, None, None, True, localrole=with_role, discussion=discussion_id, change_old_password=False) status_in_discussion = None if send_password_change and not (created_user or created_localrole): status_in_discussion = user.get_status_in_discussion(discussion_id) if send_password_change and ( created_user or created_localrole or ( resend_if_not_logged_in and ( status_in_discussion is None or not status_in_discussion.first_visit))): from assembl.views.auth.views import send_change_password_email from assembl.models import Discussion discussion = Discussion.get(discussion_id) send_change_password_email( request, user, email, subject=message_subject, text_body=text_message, html_body=html_message, discussion=discussion, sender_name=sender_name, welcome=True) return i
def participants_private_as_graph(self, discussion_id): from assembl.models import Discussion, AgentProfile local_uri = self.local_uri() discussion = Discussion.get(discussion_id) d_storage_name = self.private_user_storage.name d_graph_iri = self.private_user_storage.sections[0].graph_iri cg = ConjunctiveGraph(identifier=d_graph_iri) v = get_virtuoso(self.session, d_storage_name) v_main = get_virtuoso(self.session, self.discussion_storage_name()) participant_ids = discussion.get_participants(True) profiles={URIRef(AgentProfile.uri_generic(id, local_uri)) for id in participant_ids} self.add_subject_data(v, cg, profiles) accounts = [account for ((account, p, profile), g) in v_main.triples((None, SIOC.account_of, None)) if profile in profiles] self.add_subject_data(v, cg, accounts) return cg
def add_multiple_users_csv( request, csv_file, discussion_id, with_role, send_password_change=False, message_subject=None, text_message=None, html_message=None, sender_name=None, resend_if_not_logged_in=False): r = reader(csv_file, skipinitialspace=True) localizer = request.localizer for i, row in enumerate(r): if not len(row): # tolerate empty lines continue row = [x.decode('utf-8').strip() for x in row] if len(row) != 2: raise RuntimeError(localizer.translate(_( "The CSV file must have two columns"))) (name, email) = row if not is_email(email): if i == 0: # Header continue raise RuntimeError(localizer.translate(_( "Not an email: <%s> at line %d")) % (email, i)) if len(name) < 5: raise RuntimeError(localizer.translate(_( "Name too short: <%s> at line %d")) % (name, i)) (user, created_user, created_localrole) = add_user( name, email, None, None, True, localrole=with_role, discussion=discussion_id, change_old_password=False) status_in_discussion = None if send_password_change and not (created_user or created_localrole): status_in_discussion = user.get_status_in_discussion(discussion_id) if send_password_change and ( created_user or created_localrole or ( resend_if_not_logged_in and ( status_in_discussion is None or not status_in_discussion.first_visit))): from assembl.views.auth.views import send_change_password_email from assembl.models import Discussion discussion = Discussion.get(discussion_id) send_change_password_email( request, user, email, subject=message_subject, text_body=text_message, html_body=html_message, discussion=discussion, sender_name=sender_name, welcome=True) return i
def interesting_ideas(request): from .discussion import get_analytics_alerts ctx = request.context target = request.context._instance user_id = request.authenticated_userid or Everyone discussion_id = ctx.get_discussion_id() permissions = get_permissions(user_id, discussion_id) if P_READ not in permissions: raise HTTPUnauthorized() if user_id != target.id and P_ADMIN_DISC not in permissions: raise HTTPUnauthorized() discussion = Discussion.get(discussion_id) if not discussion: raise HTTPNotFound() result = get_analytics_alerts(discussion, target.id, ["interesting_to_me"], False) result = loads(result)['responses'][0]['data'][0]['suggestions'] result = {x['targetID']: x['arguments']['score'] for x in result} return result
def interesting_ideas(request): from .discussion import get_analytics_alerts ctx = request.context target = request.context._instance user_id = authenticated_userid(request) or Everyone discussion_id = ctx.get_discussion_id() permissions = get_permissions( user_id, discussion_id) if P_READ not in permissions: raise HTTPUnauthorized() if user_id != target.id and P_ADMIN_DISC not in permissions: raise HTTPUnauthorized() discussion = Discussion.get(discussion_id) if not discussion: raise HTTPNotFound() result = get_analytics_alerts( discussion, target.id, ["interesting_to_me"], False) result = loads(result)['responses'][0]['data'][0]['suggestions'] result = {x['targetID']: x['arguments']['score'] for x in result} return result
def delete_local_role(request): ctx = request.context instance = ctx._instance user_id = request.authenticated_userid if not user_id: raise HTTPUnauthorized() discussion_id = ctx.get_discussion_id() if discussion_id is None: raise HTTPBadRequest() permissions = get_permissions(user_id, discussion_id) requested_user = instance.user if requested_user.id != user_id and P_ADMIN_DISC not in permissions: raise HTTPUnauthorized() user = User.get(user_id) discussion = Discussion.get(discussion_id) instance.db.delete(instance) # Update the user's AgentStatusInDiscussion user.update_agent_status_unsubscribe(discussion) instance.db.flush() # maybe unnecessary return {}
def create_post(request): """ We use post, not put, because we don't know the id of the post """ request_body = json.loads(request.body) user_id = authenticated_userid(request) user = Post.db.query(User).filter_by(id=user_id).one() message = request_body.get('message', None) html = request_body.get('html', None) reply_id = request_body.get('reply_id', None) subject = request_body.get('subject', None) if not user_id: raise HTTPUnauthorized() if not message: raise HTTPUnauthorized() if reply_id: post = Post.get_instance(reply_id) post.content.reply(user, message) return {"ok": True} discussion_id = request.matchdict['discussion_id'] discussion = Discussion.get(id=int(discussion_id)) subject = subject or discussion.topic if not discussion: raise HTTPNotFound( _("No discussion found with id=%s" % discussion_id) ) for source in discussion.sources: source.send(user, message, subject=subject, html_body=html) return {"ok": True}
def set_local_role(request): # Do not use check_permissions, this is a special case ctx = request.context instance = ctx._instance user_id = request.authenticated_userid if not user_id: raise HTTPUnauthorized() discussion_id = ctx.get_discussion_id() user_uri = User.uri_generic(user_id) if discussion_id is None: raise HTTPBadRequest() permissions = get_permissions(user_id, discussion_id) json = request.json_body requested_user = json.get('user', None) if not requested_user: json['user'] = requested_user = user_uri elif requested_user != user_uri and P_ADMIN_DISC not in permissions: raise HTTPUnauthorized() if P_ADMIN_DISC not in permissions: if P_SELF_REGISTER in permissions: json['requested'] = False json['role'] = R_PARTICIPANT elif P_SELF_REGISTER_REQUEST in permissions: json['requested'] = True else: raise HTTPUnauthorized() updated = instance.update_from_json(json, user_id, ctx) view = request.GET.get('view', None) or 'default' # Update the user's AgentStatusInDiscussion user = User.get(user_id) discussion = Discussion.get(discussion_id) user.update_agent_status_subscribe(discussion) if view == 'id_only': return [updated.uri()] else: return updated.generic_json(view, user_id, permissions)
def delete_discussion(session, discussion_id): from assembl.models import Discussion, DiscussionBoundBase # delete anything related first classes = DiscussionBoundBase._decl_class_registry.itervalues() classes_by_table = { cls.__dict__.get('__table__', None): cls for cls in classes if isinstance(cls, type) } # Only direct subclass of abstract concrete_classes = set( filter( lambda cls: issubclass(cls, DiscussionBoundBase) and (not isabstract(cls)) and isabstract(cls.mro()[1]), classes_by_table.values())) tables = DiscussionBoundBase.metadata.sorted_tables tables.reverse() for table in tables: if table not in classes_by_table: continue cls = classes_by_table[table] if cls not in concrete_classes: continue print 'deleting', cls.__name__ query = session.query(cls.id) conds = cls.get_discussion_conditions(discussion_id) assert conds cond = and_(*conds) v = JoinColumnsVisitor(cls, query, classes_by_table) v.traverse(cond) query = v.final_query().filter(cond) if query.count(): print "*" * 20, "Not all deleted!" session.query(cls).filter(cls.id.in_( query.subquery())).delete(False) session.flush() # Then, delete the discussion. session.delete(Discussion.get(discussion_id)) session.flush()
def upload_file(request): """ POSTing a file upload is very different than any other endpoint in assembl API because all of the content will be passed in using a MULTIPART_HEADER, with all of data as well as the file (along with its metadata) """ # Testing purposes on front-end # raise Exception("Upload file exception occured!") db = Document.default_db ctx = request.context user_id = request.authenticated_userid or Everyone discusison_id = ctx.get_discussion_id() discussion = Discussion.get(discusison_id) permissions = get_permissions(user_id, discusison_id) mime = request.POST['mime_type'] file_name = request.POST['name'] # Check if the file has previously existed, if so, change the name by appending "(n)" # to it's name try: blob = File(discussion=discussion, mime_type=mime, title=file_name) db.add(blob) with request.POST['file'].file as f: blob.add_file_data(f) db.flush() except Exception as e: raise HTTPServerError(e) view = 'default' return blob.generic_json(view, user_id, permissions)
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [ classifier[0] != "!" for classifier in message_classifiers ] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest( _("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_( message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != ( None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_( message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } my_sentiments = { l.post_id: l for l in discussion.db.query(SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id)).join(SentimentOfPost).filter( PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition()).group_by( PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][sentiment_type[ SentimentOfPost.TYPE_PREFIX_LEN:]] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content(post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post[ 'sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def save_idea(request): discussion_id = request.matchdict['discussion_id'] idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if(idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id,discussion.id )) idea.short_title = idea_data['shortTitle'] idea.long_title = idea_data['longTitle'] if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) # calculate it early to maximize contention. ancestors = parent.get_all_ancestors() order = idea_data.get('order', 0.0) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) current_parent = None for parent_link in idea.source_links: pl_ancestors = parent_link.source.get_all_ancestors() if parent_link.source != parent: parent_link.is_tombstone=True else: parent_link.order = order current_parent = parent_link Idea.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() for ancestor in pl_ancestors: ancestor.send_to_changes() if current_parent is None: link = IdeaLink(source=parent, target=idea, order=order) idea.source_links.append(link) Idea.db.expire(parent, ['target_links']) parent.send_to_changes() for ancestor in ancestors: ancestor.send_to_changes() Idea.db.expire(idea, ['source_links']) next_synthesis = discussion.get_next_synthesis() if idea_data['inNextSynthesis']: if idea not in next_synthesis.ideas: next_synthesis.ideas.append(idea) else: if idea in next_synthesis.ideas: next_synthesis.ideas.remove(idea) idea.send_to_changes() return {'ok': True, 'id': idea.uri() }
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post ideaContentLinkQuery = discussion.db.query( PostClass.id, PostClass.idea_content_links_above_post) if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id==orphans.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( orphans, PostClass.id==orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden==asbool(hidden)) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.hidden==asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id)] ).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) ideaContentLinkQuery = ideaContentLinkQuery.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) ideaContentLinkQuery = posts.filter( ideaContentLinkQuery.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) ideaContentLinkQuery = ideaContentLinkQuery.join( parent_alias, PostClass.parent) ideaContentLinkQuery = ideaContentLinkQuery.filter( parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} liked_posts = {l.post_id: l.id for l in discussion.db.query( LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = user_pref_as_translation_table(user, service) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) ideaContentLinkQuery = ideaContentLinkQuery.filter( Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) else: posts = posts.order_by(Content.id) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = ( LikedPost.uri_generic(likedpost) if likedpost else False) if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def __getitem__(self, key): from assembl.models import Discussion discussion = Discussion.get(int(key)) if not discussion: raise KeyError() return discussion
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = get_localizer(request) discussion_id = int(request.matchdict["discussion_id"]) discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = ( [filter_name for filter_name in request.GET.getone("filters").split(",") if filter_name] if request.GET.get("filters") else [] ) try: page = int(request.GET.getone("page")) except (ValueError, KeyError): page = 1 order = request.GET.get("order") if order == None: order = "chronological" assert order in ("chronological", "reverse_chronological") if page < 1: page = 1 root_post_id = request.GET.getall("root_post_id") if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall("root_idea_id") if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall("ids") if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get("view") only_synthesis = request.GET.get("only_synthesis") if only_synthesis == "true": posts = Post.db.query(SynthesisPost) else: posts = Post.db.query(Post) posts = posts.filter(Post.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get("only_orphan") if only_orphan == "true": if root_idea_id: raise HTTPBadRequest(localizer.translate(_("Getting orphan posts of a specific idea isn't supported."))) posts = posts.filter( Post.id.in_( text(Idea._get_orphan_posts_statement(), bindparams=[bindparam("discussion_id", discussion_id)]) ) ) elif only_orphan == "false": raise HTTPBadRequest(localizer.translate(_("Getting non-orphan posts isn't supported."))) if root_idea_id: posts = posts.filter( Post.id.in_( text( Idea._get_related_posts_statement(), bindparams=[bindparam("root_idea_id", root_idea_id), bindparam("discussion_id", discussion_id)], ) ) ) if root_post_id: root_post = Post.get(id=root_post_id) posts = posts.filter( (Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ",%")) | (Post.id == root_post.id) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) # Post read/unread management is_unread = request.GET.get("is_unread") print "\n" + repr(is_unread) + "\n" if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == Post.id)) posts = posts.add_entity(ViewPost) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: # If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest(localizer.translate(_("You must be logged in to view which posts are read"))) # posts = posts.options(contains_eager(Post.source)) posts = posts.options(joinedload_all(Post.creator)) if order == "chronological": posts = posts.order_by(Content.creation_date) elif order == "reverse_chronological": posts = posts.order_by(Content.creation_date.desc()) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: if user_id: post, viewpost = query_result else: post, viewpost = query_result, None no_of_posts += 1 if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() if viewpost: serializable_post["read"] = True no_of_posts_viewed_by_user += 1 elif user_id and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) Post.db.add(viewed_post) serializable_post["read"] = True else: serializable_post["read"] = False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent # handling of pagination. Disabling # posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results # no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( # Post # ).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, # ).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) # TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def clone_discussion( from_session, discussion_id, to_session=None, new_slug=None): from assembl.models import DiscussionBoundBase, Discussion, Post discussion = Discussion.get(discussion_id) prefetch(from_session, discussion_id) changes = defaultdict(dict) if to_session is None: to_session = from_session changes[discussion]['slug'] = new_slug or (discussion.slug + "_copy") else: changes[discussion]['slug'] = new_slug or discussion.slug copies_of = {} copies = set() in_process = set() promises = defaultdict(list) def resolve_promises(ob, copy): if ob in promises: for (o, reln) in promises[ob]: print 'fullfilling', o.__class__, o.id assign_ob(o, reln, copy) del promises[ob] def recursive_clone(ob, path): if ob in copies_of: return copies_of[ob] if ob in copies: return ob if ob in in_process: print "in process", ob.__class__, ob.id return None if isinstance(ob, tuple(get_special_classes().keys())): if from_session == to_session: copy = ob else: copy = get_special_classes()[ob.__class__](ob) copies_of[ob] = copy return copy if isinstance(ob, DiscussionBoundBase): assert discussion_id == ob.get_discussion_id() print "recursive_clone", print_path(path) mapper = class_mapper(ob.__class__) (direct_reln, copy_col_props, nullable_relns, non_nullable_reln ) = get_mapper_info(mapper) values = {r.key: getattr(ob, r.key, None) for r in copy_col_props} print "->", ob.__class__, ob.id in_process.add(ob) for r in non_nullable_reln: subob = getattr(ob, r.key, None) if subob is None: from assembl.models import Idea, IdeaLink # Those might be None because the underlying idea is tombstoned if isinstance(ob, IdeaLink): subob_id = None if r.key == 'source': subob_id = ob.source_id elif r.key == 'target': subob_id = ob.target_id if subob_id: subob = ob.db.query(Idea).get(subob_id) assert subob is not None assert subob not in in_process print 'recurse ^0', r.key result = recursive_clone(subob, path + [(r.key, subob)]) assert result is not None assert result.id print 'result', result.__class__, result.id assign_dict(values, r, result) local_promises = {} for r in nullable_relns: subob = getattr(ob, r.key, None) if subob is not None: if subob in copies_of: assign_dict(values, r, copies_of[subob]) else: local_promises[r] = subob values.update(changes[ob]) if isinstance(ob, Discussion): values['table_of_contents'] = None values['root_idea'] = None values['next_synthesis'] = None copy = ob.__class__(**values) to_session.add(copy) to_session.flush() print "<-", ob.__class__, ob.id, copy.id copies_of[ob] = copy copies.add(copy) in_process.remove(ob) resolve_promises(ob, copy) for reln, subob in local_promises.items(): if subob in in_process: promises[subob].append((copy, reln)) else: print 'recurse 0', reln.key result = recursive_clone(subob, path + [(reln.key, subob)]) if result is None: # in process print "promising", subob.__class__, subob.id, reln.key promises[subob].append((copy, reln)) else: print "delayed", reln.key, result.__class__, result.id assign_ob(copy, reln, result) return copy treating = set() def stage_2_rec_clone(ob, path): if ob in treating: return if isinstance(ob, tuple(get_special_classes().keys())): if from_session == to_session: copy = ob else: copy = get_special_classes()[ob.__class__](ob) copies_of[ob] = copy return copy print "stage_2_rec_clone", print_path(path) treating.add(ob) if ob in copies_of: copy = copies_of[ob] elif ob in copies: copy = ob else: copy = recursive_clone(ob, path) resolve_promises(ob, copy) treating.add(copy) mapper = class_mapper(ob.__class__) ( direct_reln, copy_col_props, nullable_relns, non_nullable_reln ) = get_mapper_info(mapper) for r in mapper.relationships: if r in direct_reln: continue subobs = getattr(ob, r.key) if subobs is None: continue if not isinstance(subobs, list): subobs = [subobs] for subob in subobs: stage_2_rec_clone(subob, path + [(r.key, subob)]) path = [('', discussion)] copy = recursive_clone(discussion, path) stage_2_rec_clone(discussion, path) to_session.flush() for p in to_session.query(Post).filter_by( discussion=copy, parent_id=None).all(): p._set_ancestry('') to_session.flush() return copy
def get_posts(request): discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(id=int(discussion_id)) if not discussion: raise HTTPNotFound(_("No discussion found with id=%s" % discussion_id)) discussion.import_from_sources() user_id = authenticated_userid(request) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') #Rename "inbox" to "unread", the number of unread messages for the current user. no_of_messages_viewed_by_user = Post.db.query(ViewPost).join( Post, Content, Source ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id, ViewPost.actor_id == user_id, ).count() if user_id else 0 posts = Post.db.query(Post).join( Content, Source, ).filter( Source.discussion_id == discussion_id, Content.source_id == Source.id, ) no_of_posts_to_discussion = posts.count() post_data = [] if root_idea_id: if root_idea_id == Idea.ORPHAN_POSTS_IDEA_ID: ideas_query = Post.db.query(Post) \ .filter(Post.id.in_(text(Idea._get_orphan_posts_statement(), bindparams=[bindparam('discussion_id', discussion_id)] ))) else: ideas_query = Post.db.query(Post) \ .filter(Post.id.in_(text(Idea._get_related_posts_statement(), bindparams=[bindparam('root_idea_id', root_idea_id)] ))) posts = ideas_query.join(Content, Source, ) elif root_post_id: root_post = Post.get(id=root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (Post.id==root_post.id) ) #Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) elif ids: posts = posts.filter(Post.id.in_(ids)) if user_id: posts = posts.outerjoin(ViewPost, and_(ViewPost.actor_id==user_id, ViewPost.post_id==Post.id) ) posts = posts.add_entity(ViewPost) posts = posts.options(contains_eager(Post.content, Content.source)) posts = posts.options(joinedload_all(Post.creator, AgentProfile.user)) posts = posts.order_by(Content.creation_date) if 'synthesis' in filter_names: posts = posts.filter(Post.is_synthesis==True) if user_id: for post, viewpost in posts: if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() if viewpost: serializable_post['read'] = True else: serializable_post['read'] = False if root_post_id: viewed_post = ViewPost( actor_id=user_id, post=post ) Post.db.add(viewed_post) post_data.append(serializable_post) else: for post in posts: if view_def: serializable_post = post.generic_json(view_def) else: serializable_post = post.serializable() post_data.append(serializable_post) data = {} data["page"] = page data["inbox"] = no_of_posts_to_discussion - no_of_messages_viewed_by_user #What is "total", the total messages in the current context? #This gave wrong count, I don't know why. benoitg #data["total"] = discussion.posts().count() data["total"] = no_of_posts_to_discussion data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data
def save_idea(request): """Update this idea. In case the ``parentId`` is changed, handle all ``IdeaLink`` changes and send relevant ideas on the socket.""" discussion_id = int(request.matchdict['discussion_id']) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.default_db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if (idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id, discussion.id)) simple_fields = { 'shortTitle': 'short_title', 'longTitle': 'long_title', 'definition': 'definition', 'message_view_override': 'message_view_override', 'messages_in_parent': 'messages_in_parent', } for key, attr_name in simple_fields.iteritems(): if key in idea_data: setattr(idea, attr_name, idea_data[key]) if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) for parent_link in idea.source_links: # still assuming there's only one. pl_parent = parent_link.source pl_ancestors = pl_parent.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.copy(True) parent_link.source = parent parent.db.expire(parent, ['target_links']) parent.db.expire(pl_parent, ['target_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() parent_link.order = order parent_link.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() parent_link.db.flush() idea.send_to_changes() return {'ok': True, 'id': idea.uri()}
def save_idea(request): discussion_id = int(request.matchdict['discussion_id']) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.default_db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if(idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id,discussion.id )) if 'shortTitle' in idea_data: idea.short_title = idea_data['shortTitle'] if 'longTitle' in idea_data: idea.long_title = idea_data['longTitle'] if 'definition' in idea_data: idea.definition = idea_data['definition'] if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) for parent_link in idea.source_links: # still assuming there's only one. pl_parent = parent_link.source pl_ancestors = pl_parent.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.copy(True) parent_link.source = parent parent.db.expire(parent, ['target_links']) parent.db.expire(pl_parent, ['target_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() parent_link.order = order parent_link.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() parent_link.db.flush() idea.is_in_next_synthesis = idea_data.get('inNextSynthesis', False) idea.send_to_changes() return {'ok': True, 'id': idea.uri() }
def save_idea(request): discussion_id = int(request.matchdict['discussion_id']) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if(idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id,discussion.id )) if 'shortTitle' in idea_data: idea.short_title = idea_data['shortTitle'] if 'longTitle' in idea_data: idea.long_title = idea_data['longTitle'] if 'definition' in idea_data: idea.definition = idea_data['definition'] if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) current_parent = None for parent_link in idea.source_links: pl_ancestors = parent_link.source.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.is_tombstone=True Idea.db.expire(idea, ['source_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() else: parent_link.order = order current_parent = parent_link Idea.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() if current_parent is None: link = IdeaLink(source=parent, target=idea, order=order) Idea.db.add(link) # None of these 3 calls should be necessary, but they do help with # the parents being available (the "empty parent" bug). # The root cause is somewhere IdeaLink, or in sqlalchemy proper # but I can't seem to find it - benoitg - 2014-05-27 Idea.db.flush() Idea.db.expire(parent, ['target_links']) Idea.db.expire(idea, ['source_links']) parent.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() idea.is_in_next_synthesis = idea_data.get('inNextSynthesis', False) idea.send_to_changes() return {'ok': True, 'id': idea.uri() }
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread order can be chronological, reverse_chronological message, is_unread=false returns only read messages) """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name \ in request.GET.getone('filters').split(',') \ if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order == None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ##no_of_posts_to_discussion = posts.count() post_data = [] only_orphan = request.GET.get('only_orphan') if only_orphan == "true": if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = text(Idea._get_orphan_posts_statement(), bindparams=[ bindparam('discussion_id', discussion_id) ]).columns(column('post_id')).alias('orphans') posts = posts.join(orphans, PostClass.id == orphans.c.post_id) elif only_orphan == "false": raise HTTPBadRequest( localizer.translate( _("Getting non-orphan posts isn't supported."))) # "true" means hidden only, "false" (default) means visible only. "any" means both. hidden = request.GET.get('hidden_messages', "false") if hidden != 'any': posts = posts.filter(PostClass.hidden == asbool(hidden)) if root_idea_id: related = text(Idea._get_related_posts_statement(), bindparams=[ bindparam('root_idea_id', root_idea_id), bindparam('discussion_id', discussion_id) ]).columns(column('post_id')).alias('related') #Virtuoso bug: This should work... #posts = posts.join(related, PostClass.id==related.c.post_id) posts = posts.join(related, PostClass.id == related.c.post_id) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } liked_posts = { l.post_id: l.id for l in discussion.db.query(LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) #posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options(joinedload_all(Post.creator)) posts = posts.options(joinedload_all(Post.extracts)) posts = posts.options(joinedload_all(Post.widget_idea_links)) posts = posts.options(joinedload_all( SynthesisPost.publishes_synthesis)) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = LikedPost.uri_generic( likedpost) if likedpost else False post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def save_idea(request): discussion_id = int(request.matchdict['discussion_id']) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.default_db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if (idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id, discussion.id)) if 'shortTitle' in idea_data: idea.short_title = idea_data['shortTitle'] if 'longTitle' in idea_data: idea.long_title = idea_data['longTitle'] if 'definition' in idea_data: idea.definition = idea_data['definition'] if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) for parent_link in idea.source_links: # still assuming there's only one. pl_parent = parent_link.source pl_ancestors = pl_parent.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.copy(True) parent_link.source = parent parent.db.expire(parent, ['target_links']) parent.db.expire(pl_parent, ['target_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() parent_link.order = order parent_link.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() parent_link.db.flush() idea.is_in_next_synthesis = idea_data.get('inNextSynthesis', False) idea.send_to_changes() return {'ok': True, 'id': idea.uri()}
def save_idea(request): """Update this idea. In case the ``parentId`` is changed, handle all ``IdeaLink`` changes and send relevant ideas on the socket.""" discussion_id = int(request.matchdict['discussion_id']) user_id = request.authenticated_userid permissions = get_permissions(user_id, discussion_id) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.default_db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if(idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id,discussion.id )) simple_fields = { 'message_view_override': 'message_view_override', 'messages_in_parent': 'messages_in_parent', } for key, attr_name in simple_fields.iteritems(): if key in idea_data: setattr(idea, attr_name, idea_data[key]) for key, attr_name in langstring_fields.iteritems(): if key in idea_data: current = getattr(idea, attr_name) ls_data = idea_data[key] # TODO: handle legacy string instance? assert isinstance(ls_data, (dict, NoneType)) if current: if ls_data: current.update_from_json( ls_data, user_id, permissions=permissions) else: current.delete() elif ls_data: current = LangString.create_from_json( ls_data, user_id, permissions=permissions) setattr(idea, attr_name, current) if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) for parent_link in idea.source_links: # still assuming there's only one. pl_parent = parent_link.source pl_ancestors = pl_parent.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.copy(True) parent_link.source = parent parent.db.expire(parent, ['target_links']) parent.db.expire(pl_parent, ['target_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() parent_link.order = order parent_link.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() parent_link.db.flush() idea.send_to_changes() return {'ok': True, 'id': idea.uri() }
def get_cluster_info( discussion_id, idea_id=None, num_topics=200, passes=5, silhouette_cutoff=0.05, algorithm="DBSCAN", **algo_kwargs): metric = algo_kwargs.get('metric', 'cosine') if idea_id: idea = Idea.get(idea_id) discussion = idea.discussion else: idea = None discussion = Discussion.get(discussion_id) _, tfidf_model, gensim_model = get_discussion_semantic_analysis( discussion_id, num_topics=num_topics, # passes=passes) model_cls=gmodels.lsimodel.LsiModel) if not tfidf_model or not gensim_model: return lang = discussion.discussion_locales[0].split('_')[0] dirname = join(nlp_data, lang) stemmer = get_stemmer(lang) trans = identity if not isinstance(stemmer, DummyStemmer): stemmer = ReversibleStemmer( stemmer, join(dirname, STEMS_FNAME)) def trans(x): return stemmer.reverse.get(x, x) corpus = IdMmCorpus(join(dirname, CORPUS_FNAME)) # TODO: Orphans if idea: post_ids = post_ids_of(idea) else: post_ids = [x for (x,) in discussion.db.query( Content.id).filter_by(discussion_id=discussion_id).all()] if len(post_ids) < 10: return post_id_by_index = {n: post_id for (n, post_id) in enumerate(post_ids)} index_by_post_id = {post_id: n for (n, post_id) in enumerate(post_ids)} subcorpus = corpus[post_ids] tfidf_corpus = tfidf_model[subcorpus] if isinstance(gensim_model, gmodels.lsimodel.LsiModel): topic_intensities = (gensim_model.projection.s / gensim_model.projection.s[0]) else: topic_intensities = numpy.ones((num_topics,)) model_matrix = gensimvecs_to_csr( gensim_model[tfidf_corpus], num_topics, topic_intensities) if 'eps' not in algo_kwargs: # This is silly, but approximate eps with optics o = Optics(algo_kwargs.get('min_samples', 4), metric) o.calculate_distances(model_matrix.todense()) RD = o.RD print "optics result:", RD a, b = min(RD[1:]), max(RD) eps = a + (b - a) * 0.5 print "epsilon", eps algo_kwargs['eps'] = eps algorithm = getattr(sklearn.cluster, algorithm) algorithm = algorithm( metric=metric, algorithm=('brute' if metric == 'cosine' else 'auto'), **algo_kwargs) r = algorithm.fit(model_matrix) labels = r.labels_ n_clusters_raw = len(set(labels)) # n_clusters_ = n_clusters_raw - (1 if -1 in labels else 0) silhouette_score = None if n_clusters_raw > 1: silhouette_score = metrics.silhouette_score( model_matrix, labels, metric=metric) if silhouette_score < silhouette_cutoff: return None post_clusters = [] remainder = set(post_ids) for label in set(labels): if label == -1: continue subset = [n for (n, l) in enumerate(labels) if label == l] cluster = [post_id_by_index[n] for n in subset] remainder -= set(cluster) post_clusters.append(cluster) remainder = list(remainder) all_cluster_features = calc_features( post_ids, post_clusters, corpus, tfidf_model, gensim_model, num_topics, topic_intensities, trans) if idea: # Compare to children classification ( compare_with_ideas, all_idea_scores, ideas_of_post, children_remainder ) = compare_with_children( idea, post_ids, post_clusters, remainder, labels) else: compare_with_ideas = () ideas_of_post = defaultdict(tuple) all_idea_scores = defaultdict(dict) post_text = dict(Content.default_db.query(Content.id, Content.body).all()) post_info = { post_id: dict(ideas=ideas_of_post[post_id], cluster_id=labels[index_by_post_id[post_id]], text=post_text[post_id]) for post_id in post_ids } clusters = [ dict(cluster=cluster, features=all_cluster_features[n], idea_scores=all_idea_scores[n]) for (n, cluster) in enumerate(post_clusters) ] clusters.append(dict(cluster=remainder, idea_scores=all_idea_scores[-1])) return (silhouette_score, compare_with_ideas, clusters, post_info)
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author classifier: filter on message_classifier, or absence thereof (classifier=null). Can be negated with "!" """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound(localizer.translate( _("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = request.authenticated_userid or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get(post_author_id), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get(post_replies_to), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') message_classifiers = request.GET.getall('classifier') PostClass = SynthesisPost if only_synthesis == "true" else Post if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter( PostClass.discussion_id == discussion_id, ).filter(PostClass.type != 'proposition_post') ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest(localizer.translate( _("Getting orphan posts of a specific idea isn't supported."))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c( discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter( PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) ) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter( (Post.ancestry.like( root_post.ancestry + cast(root_post.id, String) + ',%' )) | (PostClass.id==root_post.id) | (PostClass.id.in_(ancestor_ids)) ) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) if message_classifiers: if any([len(classifier) == 0 for classifier in message_classifiers]): return {'total': 0, 'posts': []} polarities = [classifier[0] != "!" for classifier in message_classifiers] polarity = all(polarities) if not polarity: message_classifiers = [c.strip("!") for c in message_classifiers] if polarity != any(polarities): raise HTTPBadRequest(_("Do not combine negative and positive classifiers")) # Treat null as no classifier includes_null = 'null' in message_classifiers if includes_null: message_classifiers_nonull = filter(lambda c: c != "null", message_classifiers) if polarity: if len(message_classifiers) == 1: term = PostClass.message_classifier == (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.in_(message_classifiers_nonull) if includes_null: term = term | (PostClass.message_classifier == None) else: if len(message_classifiers) == 1: term = PostClass.message_classifier != (None if includes_null else message_classifiers[0]) else: term = PostClass.message_classifier.notin_(message_classifiers_nonull) if not includes_null: term = term | (PostClass.message_classifier == None) posts = posts.filter(term) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = {v.post_id for v in discussion.db.query( ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id))} my_sentiments = {l.post_id: l for l in discussion.db.query( SentimentOfPost).filter( SentimentOfPost.tombstone_condition(), SentimentOfPost.actor_id == user_id, *SentimentOfPost.get_discussion_conditions(discussion_id))} if is_unread != None: posts = posts.outerjoin( ViewPost, and_( ViewPost.actor_id==user_id, ViewPost.post_id==PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service.canTranslate is not None: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread my_sentiments = {} if is_unread == "false": raise HTTPBadRequest(localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter(Post.body_text_index.contains( text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: ideaContentLinkQuery = posts.with_entities( PostClass.id, PostClass.idea_content_links_above_post) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) # Note: we could count the like the same way and kill the subquery. # But it interferes with the popularity order, # and the benefit is not that high. sentiment_counts = discussion.db.query( PostClass.id, SentimentOfPost.type, count(SentimentOfPost.id) ).join(SentimentOfPost ).filter(PostClass.id.in_(posts.with_entities(PostClass.id).subquery()), SentimentOfPost.tombstone_condition() ).group_by(PostClass.id, SentimentOfPost.type) sentiment_counts_by_post_id = defaultdict(dict) for (post_id, sentiment_type, sentiment_count) in sentiment_counts: sentiment_counts_by_post_id[post_id][ sentiment_type[SentimentOfPost.TYPE_PREFIX_LEN:] ] = sentiment_count posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.disagree_count - Content.like_count, Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query( PostClass).filter(PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost = None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts if view_def != "id_only": translate_content( post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json( view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost( actor_id=user_id, post=root_post ) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False my_sentiment = my_sentiments.get(post.id, None) if my_sentiment is not None: my_sentiment = my_sentiment.generic_json('default', user_id, permissions) serializable_post['my_sentiment'] = my_sentiment if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) serializable_post['sentiment_counts'] = sentiment_counts_by_post_id[post.id] post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"])/page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size-1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size-1) data["posts"] = post_data return data