def indexReader(self, indexWriter, reader, owner, attribute, version): doc = Document() doc.add(Field("owner", owner.str16(), True, False, False)) doc.add(Field("attribute", attribute, True, False, False)) doc.add(Field("version", str(version), True, False, False)) doc.add(Field.Text("contents", reader)) indexWriter.addDocument(doc)
def addAnnotations(doc, span_id): span_id = int(span_id) for (a_dict, field_name) in a_dicts: if a_dict.has_key(span_id): a_id = a_dict[span_id] # (a_id, a_text) = a_dict[span_id] # print "%s - %s - %s" % (field_name, a_id, span_id) doc.add( Field(field_name, a_id, Field.Store.YES, Field.Index.TOKENIZED))
def create_feed_document(self, feed): doc = Document() doc.add( Field('id', str(feed.id), Field.Store.YES, Field.Index.UN_TOKENIZED)) doc.add( Field('url', feed.xml_url, Field.Store.YES, Field.Index.UN_TOKENIZED)) if feed.channel_link: doc.add( Field('link', feed.channel_link, Field.Store.YES, Field.Index.UN_TOKENIZED)) if feed.title: doc.add( Field('title', feed.title, Field.Store.YES, Field.Index.TOKENIZED)) if feed.subtitle: doc.add( Field('subtitle', feed.subtitle, Field.Store.YES, Field.Index.TOKENIZED)) return doc
def indexArticle(pmid, text): res = p.split(text) i = 1 for r in res[1::2]: span_id = r span_text = res[i + 1] try: doc = Document() doc.add( Field("span_id", span_id, Field.Store.YES, Field.Index.UN_TOKENIZED)) doc.add( Field("pmid", pmid, Field.Store.YES, Field.Index.UN_TOKENIZED)) doc.add( Field("text", span_text, Field.Store.YES, Field.Index.TOKENIZED)) addAnnotations(doc, span_id) writer.addDocument(doc) except Exception, e: sys.stderr.write("error: %s pmid: %s span_id: %s\n" % (e, pmid, span_id)) i += 2
def create_entry_documents(self, feed): docs = [] for entry in feed.get_entries(): try: doc = Document() id = '%s:%s' % (feed.xml_url, entry.get('id', None)) doc.add( Field('id', id, Field.Store.YES, Field.Index.UN_TOKENIZED)) doc.add( Field('feed_url', feed.xml_url, Field.Store.YES, Field.Index.UN_TOKENIZED)) if entry.get('title', None): doc.add( Field('title', entry['title'], Field.Store.YES, Field.Index.TOKENIZED)) if entry.get('summary', None): doc.add( Field('summary', entry['summary'], Field.Store.YES, Field.Index.TOKENIZED)) if entry.get('link', None): doc.add( Field('link', entry['link'], Field.Store.YES, Field.Index.UN_TOKENIZED)) try: updated = parser.parse(entry.get('updated', None), ignoretz=True) doc.add( Field('updated', updated.isoformat(' '), Field.Store.YES, Field.Index.NO)) except: {} try: doc.add( Field('pickle', pickle.dumps(entry), Field.Store.YES, Field.Index.NO)) except Exception, e: logging.error('Unable to store pickled entry: %s' % e) docs.append(doc) except Exception, e: logging.error(e)
def __init__( self, msg ): Document.__init__( self ) sender = msg.getheader('From') self.add( Field.Text( 'from', sender ) ) subject = msg.getheader( 'Subject' ) self.add( Field.Text( 'subject', subject ) ) body = msg.fp.read() self.add( Field.Text( 'body', body ) ) id = msg.getheader('Message-ID') self.add( Field.Keyword( 'id', id ) ) date = strftime( '%Y%m%d%H%M%S', strptime(msg.getheader('Date')) ) self.add( Field.Keyword( 'date', date ) ) self.add( Field.Text( 'all', sender + subject + body ) )
def _document_node(self, iba_node): d = PyLucene.Document() # Index the NID d.add( Field(COLUMN_NID, iba_node.nid, Field.Store.YES, Field.Index.UN_TOKENIZED)) # Index the Names for name in iba_node.names: d.add( Field(COLUMN_NAME, name[0], Field.Store.NO, Field.Index.TOKENIZED)) # Index the Attributes for att in iba_node.attributes: # allowing search for nodes having a particular attribute type d.add( Field(COLUMN_ATTRIBUTE_TYPE_NID, att.type, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of nodes with any attribute having a particular value d.add( Field(COLUMN_ATTRIBUTE_VALUE, att.value, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of nodes having aparticular attribute with a particular value d.add( Field(COLUMN_ATTRIBUTE_TYPE_NID, att.value, Field.Store.NO, Field.Index.TOKENIZED)) # Index the Statements for stat in iba_node.statements: for att in stat.attributes: # allowing the search of nodes have any predicate with the specified attribute type d.add( Field(COLUMN_PREDICATE_NID + COLUMN_ATTRIBUTE_TYPE_NID, att.type, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of nodes having the specified predicate with the specified attribute type of any value d.add( Field(stat.predicate + COLUMN_ATTRIBUTE_TYPE_NID, att.type, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of nodes having the specified predicate with any attribute type and any value d.add( Field(COLUMN_PREDICATE_NID, stat.predicate, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of nodes have any predicate with any attribute type of the specified value d.add( Field(COLUMN_PREDICATE_NID + COLUMN_ATTRIBUTE_TYPE_NID, att.value, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of node having any predicate with the specified attribute type and value d.add( Field(COLUMN_PREDICATE_NID + att.type, att.value, Field.Store.NO, Field.Index.TOKENIZED)) # allowing the search of nodes having the specified predicate with any attribute type and the specified value d.add( Field(stat.predicate + COLUMN_ATTRIBUTE_TYPE_NID, att.value, Field.Store.NO, Field.Index.TOKENIZED)) # allowign the search of nodes having a specified predicate and attribute type and value d.add( Field(stat.predicate + att.type, att.value, Field.Store.NO, Field.Index.TOKENIZED)) return d