def _generate_document_for_poll(self, poll, existing): group = poll.container.ihb fields = [] # fields are in the form of [name, value, isStored, isIndexed, isTokenized] fields.append(['title', poll.title, False, True, True]) fields.append(['karma', "0".zfill(6), False, True, False]) fields.append(['u_name', poll.creator.display_name(), False, True, True]) fields.append(['date', str(get_unix_timestamp(poll.date)), False, True, False]) # index it so that we can sort by it fields.append(['end_date', str(get_unix_timestamp(poll.end_date)), False, True, False]) # index it so that we can sort by it fields.append(['oid', self._encode_oid(poll._p_oid), True, True, False]) fields.append(['type', 'Poll', True, True, True]) fields.append(['g_name', group.display_name(), False, True, True]) # create the main text to index (title + description + creator + choices) choices = poll.get_data().choices text = "poll polls %s %s %s %s" % (poll.title, poll.get_description(), poll.creator.display_name(), ''.join(["%s " % x for x in choices])) fields.append(['text', text, False, True, True]) # create the preview text preview = self._generate_preview_text(poll.get_description()) fields.append(['preview', preview, True, False, False]) # send the document for indexing self._queue_document(fields, existing)
def _generate_document_for_wiki_page(self, wikipage, existing): latest_version = wikipage.versions[-1] group = wikipage.wiki.group fields = [] # fields are in the form of [name, value, isStored, isIndexed, isTokenized] fields.append(['title', latest_version.title, False, True, True]) fields.append(['karma', str(wikipage.get_karma_score()).zfill(6), False, True, False]) fields.append(['u_name', latest_version.author.display_name(), False, True, True]) fields.append(['date', str(get_unix_timestamp(latest_version.date)), False, True, False]) # index it so that we can sort by it fields.append(['oid', self._encode_oid(wikipage._p_oid), True, True, False]) fields.append(['type', 'Wikipage', True, True, True]) fields.append(['g_name', group.display_name(), False, True, True]) # create the main text to index (title + __raw + comments + commenting authors names + name + last editor name) tidb = get_tagged_item_database() tags = " ".join(tidb.get_tags(wikipage._p_oid)) comments = wikipage.get_comments() text = "%s %s %s %s %s %s" % (latest_version.title, latest_version.get_raw(), ''.join(["%s %s " % (x.get_summary(), x.author.display_name()) for x in comments]), wikipage.name, latest_version.author.display_name(), tags) fields.append(['text', text, False, True, True]) # create the preview text preview = self._generate_preview_text(latest_version.get_raw()) fields.append(['preview', preview, True, False, False]) # send the document for indexing self._queue_document(fields, existing)
def _generate_document_for_group(self, group, existing): fields = [] # fields are in the form of [name, value, isStored, isIndexed, isTokenized] fields.append(['karma', str(group.get_karma_score()).zfill(6), False, True, False]) fields.append(['g_name', group.display_name(), False, True, True]) fields.append(['date', str(get_unix_timestamp(group.date)), False, True, False]) # index it so that we can sort by it fields.append(['oid', self._encode_oid(group._p_oid), True, True, False]) fields.append(['type', 'Group', True, True, True]) # create the main text to index (description + group userid + name) text = "%s %s %s" % (group.description, group.get_user_id(), group.display_name()) fields.append(['text', text, False, True, True]) # create the preview text preview = self._generate_preview_text(group.description) fields.append(['preview', preview, True, False, False]) # send the document for indexing self._queue_document(fields, existing)
def _generate_document_for_blog_item(self, blogitem, existing, t): fields = [] # don't use watchable_modified_date() now that comments live separately from blog items last_edited_date = blogitem.modified if not last_edited_date: last_edited_date = blogitem.date # gather fields that are consistent across all types of blogs # fields are in the form of [name, value, isStored, isIndexed, isTokenized] fields.append(['title', blogitem.title, False, True, True]) fields.append(['karma', str(blogitem.get_karma_score()).zfill(6), False, True, False]) fields.append(['u_name', blogitem.author.display_name(), False, True, True]) fields.append(['date', str(get_unix_timestamp(last_edited_date)), False, True, False]) # index it so that we can sort by it. add T to fix Lucene range query bug fields.append(['oid', self._encode_oid(blogitem._p_oid), True, True, False]) # create the main text for indexing # (title + summary + author name) text = "%s %s %s" % (blogitem.title, blogitem.get_summary(), blogitem.author.display_name()) fields.append(['text', text, False, True, True]) # create the preview text preview = self._generate_preview_text(blogitem.get_summary()) fields.append(['preview', preview, True, False, False]) # gather fields that differ depending on the type of blog if t is qon.group.Group: group = blogitem.blog.ihb fields.append(['type', 'Discussion', True, True, True]) fields.append(['g_name', group.display_name(), False, True, True]) if t is qon.user.User: fields.append(['type', 'Usernews', True, True, True]) # index on tags tidb = get_tagged_item_database() tags = " ".join(tidb.get_tags(blogitem._p_oid)) fields.append(['tags', tags, False, True, False]) # send the document for indexing self._queue_document(fields, existing)
def group_purge(group): """Purge an unsponsored group.""" if not group.is_accepted(): # send a message to group owner that his group is being purged email = group.owners[0].get_primary_email() d=dict(group_name=group.display_name(), primary=email, group_description=group.description, ) message = _notify_group_purged % d import socket try: sendmail("ned.com Group Not Sponsored", message, [email]) except socket.error: pass # delete the group group_delete(group)