Ejemplo n.º 1
0
    def indexDocs(self, url, writer):
        type1 = FieldType()
        type1.setIndexed(True)
        type1.setStored(True)
        type1.setTokenized(False)
        type1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        type2 = FieldType()
        type2.setIndexed(True)
        type2.setStored(True)
        type2.setTokenized(True)
        type2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS)
        
        # Read Feeds
        feeds = feedparser.parse(url)

        for item in feeds["entries"]:
            print "adding", item["title"] 
            try:
                link = item["link"] 
                contents = item["description"].encode("utf-8")
                contents = re.sub('<[^<]+?>', '', ''.join(contents))
                title = item["title"]
                doc = Document()
                doc.add(Field("url", link, type1))
                doc.add(Field("title", title, type1))
                if len(contents) > 0:
                    doc.add(Field("contents", contents, type2))
                else:
                    print "warning: no content in %s" % item["title"] 
                writer.addDocument(doc)
            except Exception, e:
                 print "Failed in indexDocs:", e
Ejemplo n.º 2
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                if not filename.endswith('.txt'):
                    continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = unicode(file.read(), 'gbk')
                    file.close()
                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("path", root, t1))
                    if len(contents) > 0:
                        doc.add(Field("contents", contents, t2))
                    else:
                        print "warning: no content in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 3
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t3 = FieldType()
        t3.setIndexed(True)
        t3.setStored(False)
        t3.setTokenized(True)#利用预先设置的analyzer进行分词,这里是根据空格
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        total=0
        file = open(root,"r")
        for line in file.readlines():
            try:
                imgurl, itemurl, content = line.split('\t')
                total+=1
                print total
                print "adding", content
                contents = ' '.join(jieba.cut(content))
                doc = Document()
                doc.add(Field("imgurl", imgurl, t1))
                doc.add(Field("itemurl", itemurl, t1))
                doc.add(Field("title", content, t1))
                doc.add(Field("contents",contents,t3))
                writer.addDocument(doc)
            except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 4
0
    def index_docs(self, tweets, writer):
        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(True)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
        t1.setStoreTermVectors(True)
        t1.setStoreTermVectorOffsets(True)

        # add each tweet to the index
        for tweet in tweets:
            try:
                # strip out URLs because they provide false index matches
                contents = []
                for word in tweet[1].text.split():
                    if word.startswith("http://") or word.startswith("https://"):
                        continue
                    contents.append(word)
                contents = " ".join(contents)

                if len(contents) == 0: continue

                doc = Document()
                doc.add(Field("contents", contents, t1))
                writer.addDocument(doc)
            except Exception, e:
                print "Failed in index_docs:", e
Ejemplo n.º 5
0
    def indexDocs(self, root, writer):
        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(True)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            # traverse through the doc directory
            for filename in filenames:
                #	if not filename.endswith('.cdc'):
                #		continue
                try:
                    # only add the filename and path for indexing
                    path = os.path.join(root, filename)
                    print "adding file : ", path
                    file = open(path)
                    contents = unicode(file.read(), 'utf-8')
                    file.close()
                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("path", root, t1))
                    if len(contents) > 0:
                        doc.add(Field("contents", contents, t2))
                    else:
                        print "warning: no content in ", filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "failed in indexDocs:", e
Ejemplo n.º 6
0
	def index(self, root):

		t = FieldType()
		t.setIndexed(True)
		t.setStored(True)
		t.setTokenized(True)
		t.setStoreTermVectors(True)
		
		for path, dirs, files in os.walk(root):
			
			for file in files:
				
				filePath = os.path.join(path, file)
				fd = open(filePath)
				content = unicode(fd.read(), 'iso-8859-1')
				fd.close()
				
				doc = Document()
				doc.add(Field('name', file, StringField.TYPE_STORED))

				parent = os.path.split(path)[1]
				doc.add(Field('parent', parent, StringField.TYPE_STORED))

				if len(content) > 0:
					doc.add(Field('content', content, t))

				print 'Indexing %s' % file
				self.mWriter.addDocument(doc)

		self.mWriter.commit()
		self.mWriter.close()
Ejemplo n.º 7
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        index_file = open("index.txt", 'r')
        for line in index_file.readlines():

            try:
                src = line.strip().split('\t')[0]
                filename = line.strip().split('\t')[1]
                tag = line.strip().split('\t')[2]
                path = os.path.join(root, filename)

                doc = Document()
                doc.add(Field("name", filename, t1))
                doc.add(Field("path", root, t1))
                doc.add(Field("src", src, t1))

                if len(tag) > 0:
                    doc.add(Field("tag", tag, t2))
                else:
                    print "warning: no tag in %s" % filename
                writer.addDocument(doc)
            except Exception, e:
                print "Failed in indexDocs:", e
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
        
        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                if not filename.endswith('.html'):
                    continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = unicode(file.read(), 'iso-8859-1')
                    file.close()
                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("path", root, t1))
                    if len(contents) > 0:
                        doc.add(Field("contents", contents, t2))
                    else:
                        print "warning: no content in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 9
0
    def indexDocs(self, img_url, toi, tid):

        try:
            t1 = FieldType()
            t1.setIndexed(True)
            t1.setStored(True)
            t1.setTokenized(True)
            t1.setIndexOptions(
                FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

            print("Adding", img_url)
            name = "Pictures/1.jpg"
            conn = urllib.urlopen(img_url)
            f = open(name, 'wb')
            f.write(conn.read())
            f.close()
            img = cv2.imread(name)
            sdf = img_search_color(img)
            storeDir = 'Picture_new/' + sdf.strs
            if not os.path.exists(storeDir):
                os.mkdir(storeDir)
            cv2.imwrite(storeDir + '/' + str(toi) + '___' + str(tid) + '.jpg',
                        img)
            '''storeDir2 = 'Picture_user/'+str(tid)
			if not os.path.exists(storeDir2):
				n = 0
				os.mkdir(storeDir2)
			else :
				n = len(os.listdir(storeDir2))
			cv2.imwrite(storeDir2+'/'+str(toi)+ '_'+ str(n) + '.jpg',img)'''
        except Exception, e:
            print("Failed in indexDocs:", e)
Ejemplo n.º 10
0
    def index_docs(self, train_set, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setStoreTermVectorOffsets(True)
        t2.setStoreTermVectorPayloads(True)
        t2.setStoreTermVectorPositions(True)
        t2.setStoreTermVectors(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for ii in train_set:
            doc = Document()
            doc.add(Field("answer", ii['Answer'], t1))
            doc.add(Field("qid", ii['Question ID'], t1))
            doc.add(Field("category", ii['category'], t1))
            doc.add(Field("position", ii['Sentence Position'], t1))
            doc.add(Field("question", ii['Question Text'], t2))
            doc.add(Field("wiki_plain",
                          self.wiki_reader.get_text(ii['Answer']), t2))
            writer.addDocument(doc)
Ejemplo n.º 11
0
 def indexDocs(self, root, writer):
     t1 = FieldType() # for short items, e.g. file name.
     t1.setIndexed(True)
     t1.setStored(True)
     t1.setTokenized(False)
     t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS) # DOCS_AND_FREQS_AND_POSITIONS_OFFSETS
     
     t2 = FieldType() # for content
     t2.setIndexed(True)
     t2.setStored(False) # don't store the original text
     t2.setTokenized(True)
     t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
     
     for root, dirnames, filenames in os.walk(root):
         for filename in filenames:
             print "adding", filename
             try:
                 path = os.path.join(root, filename)
                 file = open(path)
                 contents = unicode(file.read(), 'iso-8859-1')
                 file.close()
                 doc = Document()
                 doc.add(Field("name", filename, t1))
                 doc.add(Field("path", root, t1))
                 if len(contents) > 0:
                     doc.add(Field("contents", contents, t2))
                 else:
                     print "warning: no content in %s" % filename
                 writer.addDocument(doc)
             except Exception, e:
                 print "Failed in indexDocs:", e
Ejemplo n.º 12
0
def _createNoTermsFrequencyFieldType():
    f = FieldType()
    f.setIndexed(True)
    f.setTokenized(True)
    f.setOmitNorms(True)
    f.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY)
    f.freeze()
    return f
Ejemplo n.º 13
0
def _createNoTermsFrequencyFieldType():
    f = FieldType()
    f.setIndexed(True)
    f.setTokenized(True)
    f.setOmitNorms(True)
    f.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY)
    f.freeze()
    return f
Ejemplo n.º 14
0
    def indexDocs(self, root, indextxt, writer):

        t1 = FieldType()
        t1.setIndexed(True)  #t1为需索引, 需保存, 需分词
        t1.setStored(True)
        t1.setTokenized(True)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()  #t2为不需索引, 需保存, 需分词
        t2.setIndexed(False)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:

                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    content = unicode(file.read(), 'utf-8')
                    content = content.encode('utf-8')
                    list1 = content.split('\n')
                    file.close()

                    doc = Document()

                    # 读取存于文件的内容并保存于doc
                    url = list1[0]
                    print('url : ' + url)
                    doc.add(Field("url", url, t1))

                    name = list1[1]
                    print('name : ' + name)
                    doc.add(Field("name", name, t1))

                    collectnum = list1[2]
                    print('collect_num : ' + collectnum)
                    doc.add(Field("collect_num", collectnum, t2))

                    img_url = list1[3]
                    print('img_url : ' + img_url)
                    doc.add(Field("img_url", img_url, t2))

                    zhuliao = list1[4]
                    print(zhuliao)
                    doc.add(Field("zhuliao", zhuliao, t1))

                    zuofa = list1[5]
                    zuofa = '\n'.join(zuofa.split('\t'))
                    print('zuofa : ' + zuofa)
                    doc.add(Field("zuofa", zuofa, t2))

                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 15
0
    def indexDocs(self, root, writer):

        # t1 is used for filenames and t2 is used for contents
        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                # We can index only a certain types of files
                if not (filename.endswith('.txt') or filename.endswith('.pdf') or filename.endswith('.xml') or filename.endswith('.doc') or filename.endswith('.odt')):
                    continue
                try:
                    file_path = os.path.join(root, filename)
                    outfile_path = file_path

                    # First convert PDF and DOC files to text
                    if filename.endswith('.pdf'):
                        outfile = filename.replace('.pdf', '.txt')
                        outfile_path = os.path.join(root, outfile)
                        cmd = 'pdftotext ' + '-layout ' + "'"+ file_path +  "'" + ' ' + "'" + outfile_path + "'"
                        subprocess.check_output(cmd, shell=True)
                        file_path = outfile_path
                    elif filename.endswith('.doc'):
                        outfile = filename.replace('.doc', '.txt')
                        outfile_path = os.path.join(root, outfile)
                        cmd = 'antiword ' +  file_path + ' >> ' + outfile_path
                        subprocess.check_output(cmd, shell=True)
                        file_path = outfile_path
                    elif filename.endswith('.odt'):
                        outfile = filename.replace('.odt', '.txt')
                        outfile_path = os.path.join(root, outfile)
                        cmd = 'odttotext ' + '-layout ' + "'"+ file_path +  "'" + ' ' + "'" + outfile_path + "'"
                        subprocess.check_output(cmd, shell=True)
                        file_path = outfile_path

                    file = open(file_path)
                    contents = unicode(file.read(), 'iso-8859-1')
                    file.close()
                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("path", root, t1))
                    if len(contents) > 0:
                        doc.add(Field("contents", contents, t2))
                    else:
                        logging.debug('warning: no content in %s', filename)
                    writer.addDocument(doc)
                except Exception, e:
                    logging.debug('Failed in indexDocs: %s', e)
Ejemplo n.º 16
0
class LuceneDocumentField(object):
    """Internal handler class for possible field types"""

    def __init__(self):
        """Init possible field types"""

        # FIELD_ID: stored, indexed, non-tokenized
        self.field_id = FieldType()
        self.field_id.setIndexed(True)
        self.field_id.setStored(True)
        self.field_id.setTokenized(False)

        # FIELD_ID_TV: stored, indexed, not tokenized, with term vectors (without positions)
        # for storing IDs with term vector info
        self.field_id_tv = FieldType()
        self.field_id_tv.setIndexed(True)
        self.field_id_tv.setStored(True)
        self.field_id_tv.setTokenized(False)
        self.field_id_tv.setStoreTermVectors(True)

        # FIELD_TEXT: stored, indexed, tokenized, with positions
        self.field_text = FieldType()
        self.field_text.setIndexed(True)
        self.field_text.setStored(True)
        self.field_text.setTokenized(True)

        # FIELD_TEXT_TV: stored, indexed, tokenized, with term vectors (without positions)
        self.field_text_tv = FieldType()
        self.field_text_tv.setIndexed(True)
        self.field_text_tv.setStored(True)
        self.field_text_tv.setTokenized(True)
        self.field_text_tv.setStoreTermVectors(True)

        # FIELD_TEXT_TVP: stored, indexed, tokenized, with term vectors and positions
        # (but no character offsets)
        self.field_text_tvp = FieldType()
        self.field_text_tvp.setIndexed(True)
        self.field_text_tvp.setStored(True)
        self.field_text_tvp.setTokenized(True)
        self.field_text_tvp.setStoreTermVectors(True)
        self.field_text_tvp.setStoreTermVectorPositions(True)

    def get_field(self, type):
        """Get Lucene FieldType object for the corresponding internal FIELDTYPE_ value"""
        if type == Lucene.FIELDTYPE_ID:
            return self.field_id
        elif type == Lucene.FIELDTYPE_ID_TV:
            return self.field_id_tv
        elif type == Lucene.FIELDTYPE_TEXT:
            return self.field_text
        elif type == Lucene.FIELDTYPE_TEXT_TV:
            return self.field_text_tv
        elif type == Lucene.FIELDTYPE_TEXT_TVP:
            return self.field_text_tvp
        else:
            raise Exception("Unknown field type")
Ejemplo n.º 17
0
    def indexDocs_playlist(self, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        # playlist
        success = 0
        fail = 0
        noinfo = 0

        playlists = open('data/playlist_details2.db', "r")
        for line in playlists.readlines():
            if len(line) < 20:
                noinfo += 1
                continue

            information = line.split(',')
            try:
                playID = information[0]
                playname = ' '.join(information[1:-8])
                author, playImage, tags, songIDs, sharecount, playcount, subscribedcount, commentcount = information[
                    -8:]
            except Exception, e:
                fail += 1
                print "fail"
                continue

            playname = ' '.join(jieba.cut(playname))
            author = ' '.join(jieba.cut(author))
            tags = ' '.join(jieba.cut(tags))
            # playname = ' '.join(pynlpir.segment(playname, pos_tagging=False))
            # author = ' '.join(pynlpir.segment(author, pos_tagging=False))
            tags = tags.replace("|", " ")

            doc = Document()
            doc.add(Field("ID", playID, t2))
            doc.add(Field("name", playname, t2))
            doc.add(Field("author", author, t2))
            doc.add(Field("image", playImage, t1))
            doc.add(Field("tags", tags, t2))
            doc.add(Field("songIDs", songIDs, t1))
            doc.add(Field("sharecount", sharecount, t1))
            doc.add(Field("playcount", playcount, t1))
            doc.add(Field("subscribedcount", subscribedcount, t1))
            doc.add(Field("commentcount", commentcount, t1))
            writer.addDocument(doc)
            print "歌单", playname, "成功添加"
            success += 1
Ejemplo n.º 18
0
class LuceneDocumentField(object):
    """Internal handler class for possible field types"""
    def __init__(self):
        """Init possible field types"""

        # FIELD_ID: stored, indexed, non-tokenized
        self.field_id = FieldType()
        self.field_id.setIndexed(True)
        self.field_id.setStored(True)
        self.field_id.setTokenized(False)

        # FIELD_ID_TV: stored, indexed, not tokenized, with term vectors (without positions)
        # for storing IDs with term vector info
        self.field_id_tv = FieldType()
        self.field_id_tv.setIndexed(True)
        self.field_id_tv.setStored(True)
        self.field_id_tv.setTokenized(False)
        self.field_id_tv.setStoreTermVectors(True)

        # FIELD_TEXT: stored, indexed, tokenized, with positions
        self.field_text = FieldType()
        self.field_text.setIndexed(True)
        self.field_text.setStored(True)
        self.field_text.setTokenized(True)

        # FIELD_TEXT_TV: stored, indexed, tokenized, with term vectors (without positions)
        self.field_text_tv = FieldType()
        self.field_text_tv.setIndexed(True)
        self.field_text_tv.setStored(True)
        self.field_text_tv.setTokenized(True)
        self.field_text_tv.setStoreTermVectors(True)

        # FIELD_TEXT_TVP: stored, indexed, tokenized, with term vectors and positions
        # (but no character offsets)
        self.field_text_tvp = FieldType()
        self.field_text_tvp.setIndexed(True)
        self.field_text_tvp.setStored(True)
        self.field_text_tvp.setTokenized(True)
        self.field_text_tvp.setStoreTermVectors(True)
        self.field_text_tvp.setStoreTermVectorPositions(True)

    def get_field(self, type):
        """Get Lucene FieldType object for the corresponding internal FIELDTYPE_ value"""
        if type == Lucene.FIELDTYPE_ID:
            return self.field_id
        elif type == Lucene.FIELDTYPE_ID_TV:
            return self.field_id_tv
        elif type == Lucene.FIELDTYPE_TEXT:
            return self.field_text
        elif type == Lucene.FIELDTYPE_TEXT_TV:
            return self.field_text_tv
        elif type == Lucene.FIELDTYPE_TEXT_TVP:
            return self.field_text_tvp
        else:
            raise Exception("Unknown field type")
Ejemplo n.º 19
0
class Indexer(object):
    def __init__(self, **kwargs):
        """ Initialize a new instance of the Indexer

        :param output: The output directory of the underlying index
        :param anaylzer: The overloaded analyzer to work with
        """
        self.output = kwargs.get("root", "index")
        if not os.path.exists(self.output):
            os.mkdir(self.output)

        self.analyzer = kwargs.get("analyzer", StandardAnalyzer(Version.LUCENE_CURRENT))
        self.analyzer = LimitTokenCountAnalyzer(self.analyzer, 1048576)
        self.config = IndexWriterConfig(Version.LUCENE_CURRENT, self.analyzer)
        self.config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
        self.store = SimpleFSDirectory(File(self.output))
        self.writer = IndexWriter(self.store, self.config)
        self.create_field_types()

    def index(self, document):
        """ Given a new document, add it to the index.

        :param document: The document to add to the indexer
        """
        try:
            self.writer.addDocument(document)
        except Exception:
            logger.exception("Failed to index the supplied document")

    def shutdown(self):
        """ Shutdown the currently processing indexer.
        """
        try:
            # self.writer.optimize()
            self.writer.close()
        except Exception:
            logger.exception("Failed to shutdown the indexer correctly")

    def create_field_types(self):
        """ Create the field types that will be used to specify
        what actions lucene should take on the various fields
        supplied to index.
        """
        self.field_clean = FieldType()
        self.field_clean.setIndexed(True)
        self.field_clean.setStored(True)
        self.field_clean.setTokenized(False)
        self.field_clean.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        self.field_dirty = FieldType()
        self.field_dirty.setIndexed(True)
        self.field_dirty.setStored(False)
        self.field_dirty.setTokenized(True)
        self.field_dirty.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
Ejemplo n.º 20
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        files = os.listdir(root)

        for filename in files:
            print "adding", filename
            try:
                path = os.path.join(root, filename)
                file = open(path)
                total = file.read()
                totallist=total.split("\n")

                geshou=totallist[0]
                geming=totallist[1]
                zhuanji=totallist[2]
                imgurl=totallist[3]
                liupai=totallist[4]
                shijian=totallist[5]
                jianjie=totallist[6]
                geci=totallist[7]


                contents = geming+geshou+zhuanji+liupai+geci
                seg_result = jieba.cut(contents)
                contents = ' '.join(seg_result)

                doc = Document() 
                doc.add(Field("contents",contents,t2))
                doc.add(Field("geming", geming, t1))
                doc.add(Field("geshou", geshou, t1))
                doc.add(Field("zhuanji", zhuanji, t1))
                doc.add(Field("liupai",liupai,t1))
                doc.add(Field("geci",geci,t1))
                doc.add(Field("imgurl", imgurl, t1))
                doc.add(Field("shijian",shijian,t1))
                doc.add(Field("jianjie",jianjie,t1))
                    
                writer.addDocument(doc)
                file.close()
            except Exception, e:
                print "Failed in indexDocs:", e
Ejemplo n.º 21
0
    def testAdd(self, goodname, salenum, price, shopname, url, picturename, comment, historyprice):
        analyzer = WhitespaceAnalyzer(Version.LUCENE_CURRENT)
        analyzer = LimitTokenCountAnalyzer(analyzer, 1048576)
        config = IndexWriterConfig(Version.LUCENE_CURRENT, analyzer)
        config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND)
        writer = IndexWriter(self.dir, config)
        # True,建立新索引,False,建立增量索引

        noIndexedString = FieldType()
        noIndexedString.setTokenized(False)
        noIndexedString.setIndexed(False)
        noIndexedString.setStored(True)

        try:
            print "adding", goodname

            goodname_s = unicode(goodname, 'utf8')
            seg_list_good = jieba.cut(goodname_s, cut_all=False)
            goodname_s = " ".join(seg_list_good)  # 默认模式

            shopname_s = unicode(shopname, 'utf8')
            seg_list_shop = jieba.cut(shopname_s, cut_all=False)
            shopname_s = " ".join(seg_list_shop)  # 默认模式

            shopnameField = Field("shopName", shopname, noIndexedString)
            shopnameField_s = TextField("shopName_s", shopname_s, Field.Store.NO)
            goodnameField = Field("goodName", goodname, noIndexedString)
            goodnameField_s = TextField("goodName_s", goodname_s, Field.Store.NO)
            salenumField = IntField("saleNum", salenum, Field.Store.YES)
            priceField = DoubleField("price", price, Field.Store.YES)
            urlField = Field("url", url, noIndexedString)
            pictureField = StringField("pictureName", picturename, Field.Store.YES)
            commentField = Field("comments", comment, noIndexedString)
            historyPriceField = Field("historyPrice", historyprice, noIndexedString)

            doc = Document()
            doc.add(shopnameField)
            doc.add(shopnameField_s)
            doc.add(goodnameField)
            doc.add(goodnameField_s)
            doc.add(salenumField)
            doc.add(priceField)
            doc.add(urlField)
            doc.add(pictureField)
            doc.add(commentField)
            doc.add(historyPriceField)

            writer.addDocument(doc)
        except Exception, e:
            print "Failed in indexDocs:", e
Ejemplo n.º 22
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                # if not filename.endswith('.txt'):
                # continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    pwd = os.path.join(os.getcwd() + '/' + root, filename)
                    file = open(path)
                    url = file.readline()
                    title = file.readline()
                    tag = file.readline()
                    imgurl = file.readline()
                    price = file.readline()
                    wellrate = file.readline()
                    comment = file.readline()

                    contents = unicode(file.read(), 'utf8')
                    file.close()
                    doc = Document()
                    doc.add(Field('url', url, t1))
                    doc.add(Field('title', title, t1))
                    doc.add(Field('imgurl', imgurl, t1))
                    doc.add(Field('price', price, t1))
                    doc.add(Field('wellrate', wellrate, t1))
                    doc.add(Field('comment', comment, t1))

                    if len(tag) > 2:
                        doc.add(Field('tag', tag, t2))
                    else:
                        doc.add(Field('tag', ' ', t2))
                    #doc.add(Field('comment', comment, t1))

                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 23
0
    def indexDocs(self, root, writer):

        t1 = FieldType()  #t1 is used in path and URL fields
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()  #t2 is used to index contents
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t3 = FieldType()  #t3 is used to index titles
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(True)
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t4 = FieldType()  #t3 is used to index sites
        t4.setIndexed(True)
        t4.setStored(True)
        t4.setTokenized(False)
        t4.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        indextxt = open(self.filedir, 'r')

        while True:
            t = indextxt.readline()
            if (len(t) == 0):
                indextxt.close()
                return
            t = t.split()
            filename = t[1]
            URL = t[0]
            #        for root, dirnames, filenames in os.walk(root):
            #            for filename in filenames:
            print "updating", filename
            try:
                writer.deleteDocuments(Term("name", filename))
                path = os.path.join(root, filename)
                file = open(path)
                title = file.readline()
                print title
                contents = unicode(file.read())
                file.close()
                doc = Document()
                doc.add(Field("name", filename, t1))
                doc.add(Field("path", path, t1))
                doc.add(Field("url", URL, t1))
                doc.add(Field("title", title, t3))
                doc.add(Field("site", get_site(URL), t4))
                if len(contents) > 0:
                    doc.add(Field("contents", contents, t2))
                else:
                    print "warning: no content in %s" % filename
                writer.addDocument(doc)
            except Exception, e:
                print "Failed in indexDocs:", e
Ejemplo n.º 24
0
    def indexDocs(self, dialog, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        f = open(dialog)
        lines = f.readlines()
        for line in lines:
            info = line.split('\t')
            url = info[0]
            filename = info[1]
            title = info[2]
            print "adding", filename
            try:
                path = os.path.join(root, filename)
                file = open(path)
                contents = unicode(file.read(), 'utf-8')
                file.close()
                doc = Document()
                doc.add(Field('name', filename, t1))
                doc.add(Field('path', path, t1))
                doc.add(
                    Field('url', url, Field.Store.YES,
                          Field.Index.NOT_ANALYZED))
                if len(contents) > 0:
                    soup = BeautifulSoup(contents)
                    try:
                        title = soup.title.text
                        doc.add(
                            Field('title', title, Field.Store.YES,
                                  Field.Index.ANALYZED))
                    except Exception, e:
                        doc.add(
                            Field('title', 'none', Field.Store.YES,
                                  Field.Index.ANALYZED))
                    contents = ''.join(soup.findAll(text=True))
                    doc.add(Field("contents", analysis(contents), t2))
                else:
                    print "warning: no content in %s" % filename
                writer.addDocument(doc)
Ejemplo n.º 25
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
        path="./shi/poet.song.%s.json" %(str(root))
        print path
        with open(path, 'r') as load_f:
            load_dict = json.load(load_f)
        count=0
        for dic in load_dict:
            try:
                print "new item!"
                values = dic.values()
                # print len(values)
                strains = values[0][0].encode('utf-8')
                print "strains:", strains
                paragraphs = values[1][0].encode('utf-8')
                print "paragraphs:", paragraphs
                title = values[2].encode('utf-8')
                print "title:", title
                author = values[3].encode('utf-8')
                print "author:", author
                doc = Document()
                doc.add(Field("strains", strains, t1))
                doc.add(Field("paragraphs_untokened", paragraphs, t1))
                doc.add(Field("title_untokened", title, t1))
                doc.add(Field("author", author, t1))

                seg_list1 = jieba.cut(paragraphs)
                paragraphs_tokened = " ".join(seg_list1)
                seg_list2=jieba.cut(title)
                title_tokened = " ".join(seg_list2)
                doc.add(Field("paragraphs_tokened", paragraphs_tokened, t2))
                doc.add(Field("title_tokened", title, t2))
                writer.addDocument(doc)
                count += 1
                print count
            except:
                pass
Ejemplo n.º 26
0
    def indexDocs(self, indexfile, writer):
        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        index_info = open(indexfile)
        for line in index_info.readlines():
            info = line.split('\t', 2)
            "adding", info[0]
            try:
                path = os.path.join(root, info[0])
                file = open(path)
                #contents = unicode(file.read(), 'gbk')
                contents = file.read()
                file.close()

                proto, rest = urllib2.splittype(info[1])
                domain, rest = urllib2.splithost(rest)
                if not domain:
                    domain = "unknown"
                if domain[0:4] == "www.":
                    domain = domain[4:]
                doc = Document()
                doc.add(Field("name", info[0], t1))
                doc.add(Field("path", os.path.abspath(path), t1))
                doc.add(Field("url", info[1], t1))
                doc.add(Field("title", info[2], t1))
                doc.add(
                    Field("site", domain, Field.Store.YES,
                          Field.Index.ANALYZED))

                if len(contents) > 0:
                    doc.add(Field("contents", contents, t2))
                else:
                    print "warning: no content in %s" % info[0]
                writer.addDocument(doc)
            except Exception, e:
                print "Failed in indexDocs:", e
            finally:
    def setUp(self):
        super(Test_Bug1842, self).setUp()

        self.analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
        
        w1 = self.getWriter(analyzer=self.analyzer)
        doc1 = Document()

        ftype = FieldType()
        ftype.setStored(False)
        ftype.setIndexed(True)
        ftype.setStoreTermVectors(True)
        doc1.add(Field("all", "blah blah blah Gesundheit", ftype))
        doc1.add(Field('id', '1', StringField.TYPE_NOT_STORED))

        w1.addDocument(doc1)
        w1.close()
Ejemplo n.º 28
0
    def setUp(self):
        super(Test_Bug1842, self).setUp()

        self.analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)

        w1 = self.getWriter(analyzer=self.analyzer)
        doc1 = Document()

        ftype = FieldType()
        ftype.setStored(False)
        ftype.setIndexed(True)
        ftype.setStoreTermVectors(True)
        doc1.add(Field("all", "blah blah blah Gesundheit", ftype))
        doc1.add(Field('id', '1', StringField.TYPE_NOT_STORED))

        w1.addDocument(doc1)
        w1.close()
Ejemplo n.º 29
0
def index_article(writer, art_id, art_body):
    art_id_field = FieldType()
    art_id_field.setIndexed(True)
    art_id_field.setStored(True)
    art_id_field.setTokenized(False)
    art_id_field.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

    art_body_field = FieldType()
    art_body_field.setIndexed(True)
    art_body_field.setStored(True)
    art_body_field.setTokenized(True)
    art_body_field.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    doc = Document()
    doc.add(Field("art_id", str(art_id), art_id_field))
    doc.add(Field("art_body", art_body, art_body_field))

    writer.addDocument(doc)
Ejemplo n.º 30
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setStoreTermVectorOffsets(True)
        t2.setStoreTermVectorPayloads(True)
        t2.setStoreTermVectorPositions(True)
        t2.setStoreTermVectors(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                if not filename.endswith('.json'):
                    continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = json.load(file)
                    file.close()
                    doc = Document()
                    doc.add(Field("title", contents['title'], t2))
                    doc.add(Field("path", root, t1))
                    doc.add(Field("playStoreURL", contents['playStoreURL'],
                                  t1))
                    doc.add(Field("creator", contents['creator'], t1))
                    extendedInfo = contents['extendedInfo']
                    if len(extendedInfo['description']) > 0:
                        doc.add(
                            Field("description", extendedInfo['description'],
                                  t2))
                    else:
                        print "warning: no description in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 31
0
class Indexer(object):  # 建立索引
    def __init__(self, indexDir, doClear=True, computeLengthNorm=False):
        #         if not jpype.isJVMStarted():
        #         lucene.initVM()
        lucene.getVMEnv().attachCurrentThread()
        self.analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
        # self.analyzer = LimitTokenCountAnalyzer(self.analyzer, 100678)#is here?
        self.config = IndexWriterConfig(Version.LUCENE_CURRENT, self.analyzer)
        self.config.setRAMBufferSizeMB(256.0)  # 设置自动提交的最大RAM为256MB
        self.config.setMaxBufferedDocs(10000)  # 设置自动提交的最大Docs个数为10000
        if not computeLengthNorm:
            sim = CustomSimilarity()
            self.config.setSimilarity(sim)
        self.path = os.path.join(INDEX_PATH, indexDir)
        # print self.path
        # path.mkdir(self.path)
        #         if doClear:
        #             self.clearExistingIndex()
        self.store = SimpleFSDirectory(File(self.path))
        self.writer = IndexWriter(self.store, self.config)

        self.t1 = FieldType()  # 域t1
        self.t1.setIndexed(True)
        self.t1.setStored(True)
        self.t1.setTokenized(False)
        self.t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        self.t2 = FieldType()  # 域t2
        self.t2.setIndexed(True)
        self.t2.setStored(False)
        self.t2.setTokenized(True)
        self.t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    def clearExistingIndex(self):  # 删除索引?
        indexdir = self.path
        for thefile in os.listdir(indexdir):
            filepath = os.path.join(indexdir, thefile)
            try:
                if os.path.isfile(filepath):
                    os.unlink(filepath)
            except Exception, e:
                logger.error("Delete file %s failed: %s", filepath, str(e))
Ejemplo n.º 32
0
    def indexDocs(self, root, writer, p):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        file0 = open("books_detail.txt")
        dic = {}
        for lines in file0.readlines():
            try:
                tmp = lines.split('\t')
                dic[tmp[0]] = [tmp[1], tmp[2], tmp[4]]
            except:
                continue

        for n, k in dic.items():
            print "adding", n
            try:
                doc = Document()
                n = unicode(n, "utf-8")
                l = ""
                for i in n:
                    l = l + i + " "
                j = " ".join(jieba.cut(n))
                if p == 1:
                    doc.add(Field("name", j, t2))
                    doc.add(Field("path", k[0], t1))
                    doc.add(Field("price", k[1], t1))
                    doc.add(Field("imgsrc", k[2], t1))
                    doc.add(Field("not_seg", l, t2))
                    doc.add(Field("org", n, t1))
                writer.addDocument(doc)
            except Exception, e:
                print "Failed in indexDocs:", e
Ejemplo n.º 33
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        i = 0

        for j in os.listdir(root):
            filename = root + '/' + j
            print filename
            print "adding", filename
            file = open(filename, 'r')
            doc = Document()
            doc.add(Field('名字', j[:-4], t2))
            while True:
                line = file.readline()
                line = line.split()
                if not line:
                    break

                if len(line) > 1:
                    if line[0] == '标题' or line[0] == '导演' or line[
                            0] == '主演' or line[0] == '类型:':
                        doc.add(Field(line[0], ' '.join(line[1:]), t2))
                    else:
                        doc.add(Field(line[0], ' '.join(line[1:]), t1))
            writer.addDocument(doc)
            file.close()
            i += 1
            print i
Ejemplo n.º 34
0
    def indexDocs(self, root, writer, urlDic):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t3 = FieldType()
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(False)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                if not filename.endswith('.htm') and not filename.endswith(
                        '.html') and not filename.endswith(
                            '.com') and not filename.endswith('.cn'):
                    continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)

                    url = urlDic[filename]
                    proto, rest = urllib.splittype(url)
                    site, rest = urllib.splithost(rest)

                    file = open(path)
                    contents = file.read()
                    file.close()

                    soup = BeautifulSoup(contents, features='html.parser')
                    title = soup.title.string
                    title = unicode(title).encode('utf-8')
                    title = title.replace("\n", '')

                    contents = soup.get_text().encode('utf-8')
                    seg_list = jieba.cut(contents)
                    contents = " ".join(seg_list)

                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("path", path, t1))
                    doc.add(Field("title", title, t1))
                    doc.add(Field("url", url, t1))
                    doc.add(Field("site", site, t3))
                    if len(contents) > 0:
                        doc.add(Field("contents", contents, t2))
                    else:
                        print "warning: no content in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 35
0
def lazyImport():
    global imported
    if imported:
        return

    from meresco.pylucene import getJVM
    getJVM()

    from java.io import File
    from org.apache.lucene.document import Document, StringField, Field, LongField, FieldType
    from org.apache.lucene.search import IndexSearcher, TermQuery
    from org.apache.lucene.index import DirectoryReader, Term, IndexWriter, IndexWriterConfig
    from org.apache.lucene.store import FSDirectory
    from org.apache.lucene.util import Version
    from org.apache.lucene.analysis.core import WhitespaceAnalyzer

    UNINDEXED_TYPE = FieldType()
    UNINDEXED_TYPE.setIndexed(False)
    UNINDEXED_TYPE.setStored(True)
    UNINDEXED_TYPE.setTokenized(False)

    imported = True
    globals().update(locals())
Ejemplo n.º 36
0
 def indexDocs(self, root, writer): 
      
     #Create a new FieldType with default properties. 
     t1 = FieldType() 
     t1.setIndexed(True) 
     t1.setStored(True) 
     t1.setTokenized(False)#True if this field's value should be analyzed by the Analyzer. 
     t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS) 
      
     #Create a new FieldType with default properties. 
     t2 = FieldType() 
     t2.setIndexed(True) 
     t2.setStored(True) 
     t2.setTokenized(True)#True if this field's value should be analyzed by the Analyzer. 
     t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) 
      
     for root, dirnames, filenames in os.walk(root): 
         for filename in filenames: 
             if not filename.endswith('.txt'): 
                 continue 
             print 'adding', filename 
             try: 
                 path = os.path.join(root, filename) 
                 file = open(path) 
                 contents = file.read() 
                 file.close() 
                 doc = Document() 
                 doc.add(Field('name', filename, t1)) 
                 doc.add(Field('path', root, t1)) 
                 if len(contents) > 0: 
                     doc.add(Field('contents', contents, t2)) 
                     print 'length of content is %d'%(len(contents)) 
                 else: 
                     print 'warning: no content in %s' % filename 
                 writer.addDocument(doc) 
             except Exception, e: 
                 print 'Failed in indexDocs:', e 
Ejemplo n.º 37
0
    def indexDocs(self, root, writer):
        global count

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                try:
                    data = load(filename)
                    raw_title = data['title']
                    title = ' '.join(jieba.cut_for_search(raw_title))
                    print title
                    print "adding", filename
                    doc = Document()
                    doc.add(Field("raw_title", raw_title, t1))
                    doc.add(Field("filename", filename, t1))
                    doc.add(Field("url", data['url'], t1))
                    doc.add(Field("title", title, t2))
                    doc.add(Field("ctime", str(data['ctime']), t1))
                    doc.add(Field("content", data['content'], t1))
                    for img in data['realimgs']:
                        doc.add(Field("imgs", img[1], t1))
                        doc.add(Field("imgurls", img[0], t1))
                    writer.addDocument(doc)
                    count += 1
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 38
0
	def indexDocs(self,root,writer):
		t1 = FieldType()
		t1.setIndexed(True)
		t1.setStored(True)
		t1.setTokenized(True)
		t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

		t2 = FieldType()
		t2.setIndexed(True)
		t2.setStored(False)
		t2.setTokenized(True)
		t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

		for root, dirnames,filenames in os.walk(root):
			# traverse through the doc directory
			for filename in filenames:
				# only if this file ends with '.c'
				if not filename.endswith('.c'):
					continue
				try:
					# only add the filename and path for indexing
					path = os.path.join(root,filename)
					print "adding file : ",path
					file = open(path)
					contents = unicode(file.read(),'utf-8')
					file.close()
					doc = Document()
					doc.add(Field("name",filename,t1))
					doc.add(Field("path",root,t1))
				#	if len(contents) > 0:
				#		doc.add(Field("contents",contents,t2))
				#	else:
				#		print "warning: no content in ",filename
					writer.addDocument(doc)
				except Exception,e:
					print "failed in indexDocs:",e
Ejemplo n.º 39
0
    def indexDocs(self, root, writer):

        t1 = FieldType() #t1 is used in URL fields
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType() #t2 is used to index contents
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t3 = FieldType() #t3 is used to index titles
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(True)
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)



        indextxt = open(self.filedir, 'r')


        while True:
            t = indextxt.readline()
            if (len(t) == 0):
                indextxt.close()
                return
            filename = t.strip()
#        for root, dirnames, filenames in os.walk(root):
#            for filename in filenames:
            print "updating", filename
            try:
                path = os.path.join(root, filename)
                file = open(path)
                title = file.readline()
                print title
                page_URL = file.readline()
                while True:
                    imgsrc = file.readline()
                    if (imgsrc == 'EOF'):
                        file.close()
                        break
                    contents = file.readline()
                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("imgurl", imgsrc, t1))
                    doc.add(Field("url", page_URL, t1))
                    doc.add(Field("title",title, t3))
                    doc.add(Field("contents", contents, t2))
                    writer.addDocument(doc)
            except Exception, e:
                print "Failed in indexDocs:", e
Ejemplo n.º 40
0
    def indexDocs(self, root, writer):
        # author id
        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        # content (segmented text) label
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
        # likes imgurl text (original text) title
        t3 = FieldType()
        t3.setIndexed(False)
        t3.setStored(True)
        t3.setTokenized(False)
        # content (segmented text) title_tokened
        t4 = FieldType()
        t4.setIndexed(True)
        t4.setStored(False)
        t4.setTokenized(True)
        t4.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        def addDoc(title,author,text,img,likes,content,title_tokened):
            try:
                doc = Document()
                doc.add(Field("author", author, t1))
                doc.add(Field("imgurl", img, t3))
                doc.add(Field("likes", likes, t3))
                doc.add(Field("text", text, t3))
                doc.add(Field("title", title, t3))
                doc.add(Field("title_tokened", title_tokened, t4))
                doc.add(Field("content", content, t4))
                doc.add(Field("label", '', t2))
                doc.add(Field("id", 'ch'+str(self.success), t1))
                writer.addDocument(doc)
                self.success += 1
            except Exception, e:
                print "Failed in indexDocs:", e
Ejemplo n.º 41
0
def index_wiki(wiki_xmlfile, index_directory_name):
    # Initialize index directory and analyzer.
    version = Version.LUCENE_CURRENT
    store = FSDirectory.open(File(index_directory_name))
    analyzer = StandardAnalyzer(version)
    # Creates config file.
    config = IndexWriterConfig(version, analyzer)
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
    writer = IndexWriter(store, config)
    # Set document content field type.
    content_fieldtype = FieldType()
    content_fieldtype.setIndexed(True)
    content_fieldtype.setStored(True)
    content_fieldtype.setTokenized(True)
    content_fieldtype.setIndexOptions(
        FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    # Set document title field type.
    title_fieldtype = FieldType()
    title_fieldtype.setIndexed(True)
    title_fieldtype.setStored(True)
    title_fieldtype.setTokenized(True)
    title_fieldtype.setIndexOptions(
        FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    # Set document url field type.
    url_fieldtype = FieldType()
    url_fieldtype.setIndexed(True)
    url_fieldtype.setStored(True)
    url_fieldtype.setTokenized(False)
    url_fieldtype.setIndexOptions(
        FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    for xmldoc in wikicorpusxml((wiki_xmlfile)):
        content = xmldoc.partition('>')[2].partition('<')[0].strip()
        title = xmldoc.partition(' title="')[2].partition('"')[0].strip()
        url = xmldoc.partition(' url="')[2].partition('"')[0].strip()
        doc = Document()
        doc.add(Field("contents", content, content_fieldtype))
        doc.add(Field("title", title, title_fieldtype))
        doc.add(Field("url", url, url_fieldtype))
        writer.addDocument(doc)

    writer.commit()
    writer.close()
Ejemplo n.º 42
0
def index_wiki(wiki_xmlfile, index_directory_name):
    lucene.initVM()
    # Initialize index directory and analyzer.
    version = Version.LUCENE_CURRENT
    store = FSDirectory.open(File(index_directory_name))
    analyzer = StandardAnalyzer(version)
    # Creates config file.
    config = IndexWriterConfig(version, analyzer)
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
    writer = IndexWriter(store, config)
    # Set document content field type.
    content_fieldtype = FieldType()
    content_fieldtype.setIndexed(True)
    content_fieldtype.setStored(True)
    content_fieldtype.setTokenized(True)
    content_fieldtype.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
    
    # Set document title field type.
    title_fieldtype = FieldType()
    title_fieldtype.setIndexed(True)
    title_fieldtype.setStored(True)
    title_fieldtype.setTokenized(True)
    title_fieldtype.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
    
    # Set document url field type.
    url_fieldtype = FieldType()
    url_fieldtype.setIndexed(True)
    url_fieldtype.setStored(True)
    url_fieldtype.setTokenized(False)
    url_fieldtype.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
    
    
    for xmldoc in wikicorpusxml((wiki_xmlfile)):
        content = xmldoc.partition('>')[2].partition('<')[0].strip()
        title = xmldoc.partition(' title="')[2].partition('"')[0].strip()
        url = xmldoc.partition(' url="')[2].partition('"')[0].strip()
        doc = Document()
        doc.add(Field("contents", content, content_fieldtype))
        doc.add(Field("title", title, title_fieldtype))
        doc.add(Field("url", url, url_fieldtype))
        writer.addDocument(doc)
     
    writer.commit()
    writer.close()
Ejemplo n.º 43
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                # if not filename.endswith('.txt'):
                #     continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    url = file.readline()
                    url = url.split('\n')
                    url = url[0]

                    contents = unicode(file.read(), 'UTF-8')

                    soup = BeautifulSoup(contents, "html.parser")
                    tmp = soup.find('title')
                    if tmp:
                        title = tmp.get_text().encode("UTF-8")
                    else:
                        title = "None title"

                    print title

                    seg_list = jieba.cut(contents)
                    string = ' '.join(seg_list)
                    # string=list((seg_list))
                    # use=''
                    # for i in string:
                    #     use+=i
                    use_index = string.encode("UTF-8")
                    # print use_index
                    file.close()

                    doc = Document()
                    doc.add(Field("name", filename, t1))
                    doc.add(Field("path", path, t1))
                    doc.add(Field("url", url, t1))
                    if len(use_index) > 0:
                        doc.add(Field("contents", use_index, t2))
                    else:
                        doc.add(Field("contents", "None content", t2))
                        print "warning: no content in %s" % filename

                    # if len(title)>0:
                    doc.add(Field("titles", title, t2))
                    # else:
                    #     print "warning: no title in %s" % filename

                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 44
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t3 = FieldType()
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(True)  #利用预先设置的analyzer进行分词,这里是根据空格
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        file = open("pages.txt", "r")
        indexTable = {}
        for line in file.readlines():
            page, filename = line.split('\t')
            indexTable[filename.strip('\n')] = page
        file.close()
        total = 0
        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                raw_filename = filename[:-5]  #wipe off the .html suffix
                if (raw_filename.endswith('pdf')
                        or raw_filename.endswith('apk')):
                    continue
                print total
                print "adding", raw_filename
                try:
                    total += 1
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = file.read()
                    soup = bs4.BeautifulSoup(contents, features="html.parser")
                    title = soup.find("title")
                    contents = ''.join(soup.findAll(text=True))
                    contents = ' '.join(jieba.cut(contents))
                    file.close()
                    doc = Document()
                    if not title:
                        title = "Title Not Found"
                    else:
                        title = title.text
                    print title
                    doc.add(Field("title", title, t1))
                    doc.add(Field("name", raw_filename, t1))
                    url = indexTable[raw_filename]
                    doc.add(Field("url", url, t1))
                    doc.add(Field("path", path, t1))

                    sites = urlparse.urlparse(url).netloc
                    sitesList = [sites]
                    itemList = sites.split('.', 1)  #逐个获得site
                    while (len(itemList) != 1):
                        sites = itemList[1]
                        sitesList.append(sites)
                        itemList = sites.split('.', 1)
                    doc.add(Field("site", ' '.join(sitesList), t3))
                    if len(contents) > 0:
                        doc.add(Field("contents", contents, t2))
                    else:
                        print "warning: no content in %s" % raw_filename
                    writer.addDocument(doc)

                except Exception, e:
                    print "Failed in indexDocs:", e
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.store import RAMDirectory
from org.apache.lucene.document import Document, Field, FieldType
from org.apache.lucene.util import BytesRef, BytesRefIterator, Version
from org.apache.lucene.index import \
    IndexWriterConfig, IndexWriter, DirectoryReader

if __name__ == '__main__':
    lucene.initVM(vmargs=['-Djava.awt.headless=true'])

directory = RAMDirectory()
iconfig = IndexWriterConfig(Version.LUCENE_CURRENT, LimitTokenCountAnalyzer(StandardAnalyzer(Version.LUCENE_CURRENT), 100))
iwriter = IndexWriter(directory, iconfig)

ft = FieldType()
ft.setIndexed(True)
ft.setStored(True)
ft.setTokenized(True)
ft.setStoreTermVectors(True)
ft.setStoreTermVectorOffsets(True)
ft.setStoreTermVectorPositions(True)

ts = ["this bernhard is the text to be index text",
      "this claudia is the text to be indexed"]
for t in ts:
    doc = Document()
    doc.add(Field("fieldname", t, ft))
    iwriter.addDocument(doc)

iwriter.commit()
iwriter.close()
Ejemplo n.º 46
0
def indexDocs(root, writer):
        """
        indexed: name title content
        stored: date name tilte sumary
        :param root:
        :param writer:
        :return:
        """
        #index and store
        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        #only index, but not store
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        #only store
        t3 = FieldType()
        t3.setIndexed(False)
        t3.setStored(True)
        t3.setTokenized(False)
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        for root, dirnames, filenames in os.walk(root):
            print filenames
            for filename in filenames:
                if not filename.endswith('.md'):
                    continue
                print "adding", filename
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = unicode(file.read(), 'utf-8')
                    file.close()

                    date, name = get_date_name(filename)
                    title, content = get_post_title_content(contents)
                    summary = content[:200] if content else ''

                    print date, name, title

                    doc = Document()
                    doc.add(Field('date', date, t3))
                    doc.add(Field('name', name, t1))
                    doc.add(Field('title', title, t1))
                    doc.add(Field('content', content, t2))
                    doc.add(Field('summary', summary, t3))


                    # doc.add(Field("name", filename, t1))
                    # doc.add(Field("path", root, t1))
                    # if len(contents) > 0:
                    #     doc.add(Field("contents", contents, t2))
                    # else:
                    #     print "warning: no content in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
    def _getIndex(self, even, odd):

        mergePolicy = LogDocMergePolicy()
        mergePolicy.setMergeFactor(1000)
        directory = RAMDirectory()
        self.dirs.append(directory)

        writer = self.getWriter(directory=directory,
                                analyzer=SimpleAnalyzer(Version.LUCENE_CURRENT),
                                maxBufferedDocs=2, mergePolicy=mergePolicy)

        if self.dvStringSorted:
            # Index sorted
            stringDVType = FieldInfo.DocValuesType.SORTED
        elif self.notSorted:
            # Index non-sorted
            stringDVType = FieldInfo.DocValuesType.BINARY
        else:
            # sorted anyway
            stringDVType = FieldInfo.DocValuesType.SORTED

        ft1 = FieldType()
        ft1.setStored(True)
        ft2 = FieldType()
        ft2.setIndexed(True)

        for i in xrange(len(self.data)):
            if (i % 2 == 0 and even) or (i % 2 == 1 and odd):
                doc = Document()
                doc.add(Field("tracer", self.data[i][0], ft1))
                doc.add(TextField("contents", self.data[i][1], Field.Store.NO))
                if self.data[i][2] is not None:
                    doc.add(StringField("int", self.data[i][2], Field.Store.NO))
                    if self.supportsDocValues:
                        doc.add(NumericDocValuesField("int_dv", Long.parseLong(self.data[i][2])))
                if self.data[i][3] is not None:
                    doc.add(StringField("float", self.data[i][3], Field.Store.NO))
                    if self.supportsDocValues:
                        doc.add(FloatDocValuesField("float_dv", Float.parseFloat(self.data[i][3])))

                if self.data[i][4] is not None:
                    doc.add(StringField("string", self.data[i][4], Field.Store.NO))
                    if self.supportsDocValues:
                        if stringDVType == FieldInfo.DocValuesType.SORTED:
                            doc.add(SortedDocValuesField("string_dv", BytesRef(self.data[i][4])))
                        elif stringDVType == FieldInfo.DocValuesType.BINARY:
                            doc.add(BinaryDocValuesField("string_dv", BytesRef(self.data[i][4])))
                        else:
                            raise ValueError("unknown type " + stringDVType)

                if self.data[i][5] is not None:
                    doc.add(StringField("custom", self.data[i][5], Field.Store.NO))
                if self.data[i][6] is not None:
                    doc.add(StringField("i18n", self.data[i][6], Field.Store.NO))
                if self.data[i][7] is not None:
                    doc.add(StringField("long", self.data[i][7], Field.Store.NO))
                if self.data[i][8] is not None:
                    doc.add(StringField("double", self.data[i][8], Field.Store.NO))
                    if self.supportsDocValues:
                        doc.add(NumericDocValuesField("double_dv", Double.doubleToRawLongBits(Double.parseDouble(self.data[i][8]))))
                if self.data[i][9] is not None:
                    doc.add(StringField("short", self.data[i][9], Field.Store.NO))
                if self.data[i][10] is not None:
                    doc.add(StringField("byte", self.data[i][10], Field.Store.NO))
                if self.data[i][11] is not None:
                    doc.add(StringField("parser", self.data[i][11], Field.Store.NO))

                for f in doc.getFields():
                    if f.fieldType().indexed() and not f.fieldType().omitNorms():
                        Field.cast_(f).setBoost(2.0)

                writer.addDocument(doc)

        reader = writer.getReader()
        writer.close()

        return self.getSearcher(reader=reader)
    def tweetIndexer(self, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setStoreTermVectorOffsets(True)
        t2.setStoreTermVectorPayloads(True)
        t2.setStoreTermVectorPositions(True)
        t2.setStoreTermVectors(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
        x = 0
        for i in range(0,500):
            if not os.path.isfile("json/tweets-" + str(i) + ".json"):
                break

            print "adding tweets-" + str(i) + ".json"
            tweets = open("json/tweets-" + str(i) + ".json", "r")

            for line in tweets.readlines():
                tweet = json.loads(line)
                if 'limit' in tweet:
                    continue
                try:
                    doc = Document()
                    doc.add(Field("file", "json/tweets-" + str(i) + ".json", t1))
                    sname = tweet['user']['screen_name']
                    tid = str(tweet['id'])
                    text = tweet['text']
                    uname = tweet['user']['name']
                    created = tweet['created_at']
                    tstamp = tweet['timestamp_ms']
                    place = ""
                    if tweet['place']:
                        place = tweet['place']['full_name'] + ", " + tweet['place']['country']
                    lat = ""
                    lng = ""
                    titles = ""
                    urls = ""
                    exist = "false"

                    if tweet['coordinates']:
                        lat = str(tweet['coordinates']['coordinates'][1])
                        lng = str(tweet['coordinates']['coordinates'][0])
                    else:
                        lat = str((tweet['place']['bounding_box']['coordinates'][0][0][1] + tweet['place']['bounding_box']['coordinates'][0][2][1])/2)
                        lng = str((tweet['place']['bounding_box']['coordinates'][0][0][0] + tweet['place']['bounding_box']['coordinates'][0][2][0])/2)
                    
                    if len(tweet['entities']['urls']) != 0:
                        exist = "true"
                        for index in range(len(tweet['entities']['urls'])):
                            title = tweet['entities']['urls'][index]['url_title']
                            if title == None:
                                titles += ",-"
                            else:
                                title = title.encode('ascii','ignore')
                                titles += "," + str(title)
                            urls += " " + str(tweet['entities']['urls'][index]['expanded_url'])


                    searchable = text + " " + urls + " " + uname + " " + sname + " " + place
                    doc.add(Field("lookup", searchable, t2))
                    doc.add(Field("text", text, t2))
                    doc.add(Field("user_name", uname, t2)) 
                    doc.add(Field("screen_name", sname, t2))                    
                    doc.add(Field("tweet_id", tid, t2))
                    doc.add(Field("created_at", created, t2))
                    doc.add(Field("geo_lat", lat, t2))
                    doc.add(Field("geo_lng", lng, t2))
                    doc.add(Field("url_exist", exist, t2))
                    doc.add(Field("url_url", urls, t2))
                    doc.add(Field("url_title", titles, t2))
                    doc.add(Field("timestamp", tstamp, t2))
                    writer.addDocument(doc)
                    x += 1
                except Exception, e:
                    pass
            tweets.close()
Ejemplo n.º 49
0
class Worker(Process):

    """This class represents the lucene worker object, which will run
    concurrently with other workers, and contains the actual lucene logic."""

    def __init__(self, config, queue, ret):
        Process.__init__(self)
        self.config = config
        self.connection = None
        self.channel = None
        self.queue = queue
        self.ret = ret

    def run(self):
        print "Booting lucene driver worker...."
        lucene.initVM()

        self.fieldType1 = FieldType()
        self.fieldType1.setIndexed(True)
        self.fieldType1.setStored(False)
        self.fieldType1.setTokenized(True)

        self.fieldType2 = FieldType()
        self.fieldType2.setIndexed(True)
        self.fieldType2.setStored(True)
        self.fieldType2.setTokenized(False)

        while(True):
            data = self.queue.get()
            da = data[1]
            response = None
            try:
                self.fil = File(da['data']['indexdir'])
                self.d = NIOFSDirectory(self.fil)
                self.analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
                self.conf = IndexWriterConfig(
                    Version.LUCENE_CURRENT,
                    self.analyzer)

                response = getattr(self, da['action'])(da['data'])
                self.d.close()
            except Exception as e:
                print e
            if response is None:
                response = {}

            self.ret[data[0]] = response

    def rebuildIndex(self, data):
        writer = IndexWriter(
            self.d, self.conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE))

        for record in data['records']:
            doc = self.buildDocument(data['fields'], record)
            writer.addDocument(doc)

        writer.commit()
        writer.close()

    def buildDocument(self, fields, record):
        doc = Document()
        doc.add(
            Field("id",
                  record["_id"],
                  self.fieldType2))
        for field in fields:
            if isinstance(record[field], dict):
                self.dictToFields(doc, record[field])
            else:
                doc.add(
                    Field(field,
                          record[field],
                          self.fieldType1))

        return doc

    def dictToFields(self, doc, record):
        for key in record:
            if isinstance(record[key], dict):
                self.dictToFields(doc, record[key])
            else:
                doc.add(
                    Field(key,
                          record[key],
                          self.fieldType1))

    def index(self, data):
        writer = IndexWriter(
            self.d, self.conf)

        doc = self.buildDocument(data['fields'], data['record'])
        writer.addDocument(doc)

        writer.commit()
        writer.close()

    def updateindex(self, data):
        writer = IndexWriter(
            self.d, self.conf)

        doc = self.buildDocument(data['fields'], data['record'])
        writer.updateDocument(lucene.Term("_id", data['record']['_id']), doc)

        writer.optimize()
        writer.close()

    def removeindex(self, data):
        writer = IndexWriter(
            self.d, self.conf)

        writer.deleteDocuments(lucene.Term("_id", data['record']['_id']))

        writer.optimize()
        writer.close()

    def query(self, data):
        if self.fil.exists():
            searcher = IndexSearcher(DirectoryReader.open(self.d))
            query = QueryParser(
                Version.LUCENE_30,
                "id",
                self.analyzer).parse(
                data['query'])
            hits = searcher.search(query, 100000)

            results = {}

            results['totalHits'] = hits.totalHits
            results['hits'] = {}

            for hit in hits.scoreDocs:
                record = {}
                doc = searcher.doc(hit.doc)
                fields = doc.getFields()
                record['score'] = hit.score
                for field in fields:
                    if field.name() != "id":
                        record[field.name()] = field.stringValue()
                results['hits'][doc.get('id')] = record

            searcher.getIndexReader().close()
            return results
Ejemplo n.º 50
0
    def indexDocs(self, root, writer):

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:
                print "adding", filename
                doc_parser = HTMLDocumentParser()
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = unicode(file.read(), 'iso-8859-1')
                    doc_parser.feed(contents)
                    contents = doc_parser.contents
                    html_doc = HTMLDocument(contents)

                    flag = False
                    if flag:
                        print '=============='
                        print 'Title: ' + html_doc.title
                        print 'Description: ' + html_doc.description
                        print 'Month: ' + html_doc.month
                        print 'Year: ' + html_doc.year
                        print 'Authors: ' + str(html_doc.authors)
                        print 'Keywords: ' + str(html_doc.keywords)
                        print 'Timestamp: ' + str(html_doc.timestamp)
                        print ' '

                    file.close()

                    doc = Document()

                    field_filename = FieldType()
                    field_filename.setIndexed(True)
                    field_filename.setStored(True)
                    field_filename.setTokenized(False)
                    field_filename.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)        
                    doc.add(Field("filename", filename.replace('.html', ''), field_filename))

                    field_path = FieldType()
                    field_path.setIndexed(True)
                    field_path.setStored(True)
                    field_path.setTokenized(True)
                    field_path.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
                    doc.add(Field("path", root, field_path))
                    
                    field_title = FieldType()
                    field_title.setIndexed(True)
                    field_title.setStored(True)
                    field_title.setTokenized(True)
                    field_title.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)        
                    doc.add(Field("title", html_doc.title, field_title))

                    field_description = FieldType()
                    if html_doc.has_description():
                        field_description.setIndexed(True)
                    else:
                        field_description.setIndexed(True)
                    field_description.setStored(True)
                    field_description.setTokenized(True)
                    field_description.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)        
                    doc.add(Field("description", html_doc.description, field_description))

                    field_month = FieldType()
                    field_month.setIndexed(True)
                    field_month.setStored(True)
                    field_month.setTokenized(False)
                    field_month.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)        
                    doc.add(Field("month", html_doc.month, field_month))

                    field_year = FieldType()
                    field_year.setIndexed(True)
                    field_year.setStored(True)
                    field_year.setTokenized(False)
                    field_year.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)        
                    doc.add(Field("year", html_doc.year, field_year))

                    if html_doc.has_authors():
                        field_author = FieldType()
                        field_author.setIndexed(True)
                        field_author.setStored(True)
                        field_author.setTokenized(True)
                        field_author.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS) 
                        for author in html_doc.authors:
                            doc.add(Field("author", author, field_author))

                    if html_doc.has_keywords():
                        field_keyword = FieldType()
                        field_keyword.setIndexed(True)
                        field_keyword.setStored(True)
                        field_keyword.setTokenized(True)
                        field_keyword.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS) 
                        for keyword in html_doc.keywords:
                            doc.add(Field("keyword", keyword, field_keyword))

                    field_timestamp = FieldType()
                    field_timestamp.setIndexed(False)
                    field_timestamp.setStored(True)
                    field_timestamp.setTokenized(False)
                    field_timestamp.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)        
                    doc.add(Field("timestamp", html_doc.timestamp, field_timestamp))

                    if len(contents) > 0:
                        field_source = FieldType()
                        field_source.setIndexed(True)
                        field_source.setStored(True)
                        field_source.setTokenized(True)
                        field_source.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) 
                        doc.add(Field("contents", contents, field_source))
                    else:
                        print "warning: no content in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 51
0
def indexCranFull(path, writer):
    """
    This method reads in the cran.1400 file and creates an index out of it.

    These fields are used when storing documents:
        title: title of the document (in a line starting with .T)
        author: author of the document (in a line starting with .A)
        source: where this article has been published, not yet used (in a line starting with .B)
        content: body text of the article (in a line starting with .W)
    """
    # Title field type
    tft = FieldType()
    tft.setIndexed(True)
    tft.setStored(True)
    tft.setTokenized(TokenizeFields)
    tft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS) #only index the document and frequency data


    # Author field type
    aft = FieldType()
    aft.setIndexed(True)
    aft.setStored(True)
    aft.setTokenized(TokenizeFields)
    aft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) #index the document, term frequency and position data


    # Content field type
    cft = FieldType()
    cft.setIndexed(True)
    cft.setStored(True)
    cft.setTokenized(True)
    cft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    txt = open(path)
    """
    cran documents are listed in this order:
    .I # -> ignore
    .T -> starting on the next line, multi-line title
    .A -> on the next line, multiple authored separated by 'and'
    .B -> on the next line, ignore
    .W -> starting on the next line, multi-line body (content)
    """
    docid = 0
    debug = False
    while True: # in each iteration, read all the lines corresponding to a document
        line = txt.readline()

        if line == '':
            break

        if docid == 1400:
            debug = True

        if line.startswith('.I'):
            docid = int(line.split(' ')[1].strip())
            continue

        if line.startswith('.T'):
            title = ''
            while True:
                line = txt.readline()
                if line.startswith('.A'):
                    break
                title = ' '.join([title, line]).strip()

            line = txt.readline()
            # authors = ' '.join(line.split('and'))
            authors = line.strip()

            #.B, its corresponding line and .W
            txt.readline()
            txt.readline()
            txt.readline()
            body = ''
            while True:
                line = txt.readline()
                if line.startswith('.I'):
                    docid = int(line.split(' ')[1].strip())
                    break
                if line == '':
                    break
                body = ' '.join([body, line]).strip()

            doc = Document()
            doc.add(Field(title_field, title, tft))
            doc.add(Field(author_field, authors, aft))
            doc.add(Field(content_field, body, cft))
            doc.add(IntField(docid_field, docid, Field.Store.YES))
            writer.addDocument(doc)
Ejemplo n.º 52
0
Archivo: tirza.py Proyecto: komax/tirza
    analyzer = StandardAnalyzer(Version.LUCENE_43)
    config = IndexWriterConfig(Version.LUCENE_43, analyzer)
    writer = IndexWriter(directory, config)
    return writer

def open_searcher(writer):
    from org.apache.lucene.search import IndexSearcher
    reader = writer.getReader()
    searcher = IndexSearcher(reader)
    return reader, searcher

from org.apache.lucene.document import Document, Field, FieldType, TextField, StringField
from org.apache.lucene.util import BytesRef, BytesRefIterator
from org.apache.lucene.index import Term
vectorFieldType = FieldType(TextField.TYPE_NOT_STORED)
vectorFieldType.setIndexed(True)
vectorFieldType.setTokenized(True)
vectorFieldType.setStoreTermVectors(True)
vectorFieldType.setStoreTermVectorPositions(False)

writer = open_writer('data/index')

def addToIndex(lxmlNode):
    uri = xpathFirst(lxmlNode, '//oa:hasTarget/@rdf:resource')
    print uri
    seen = set()
    doc = Document()
    for fieldName in FIELD_NAMES:
        seen.clear()
        for subpath in [
            '', '/*/rdfs:label', '/*/skos:prefLabel', '/*/skos:altLabel',
Ejemplo n.º 53
0
    def indexTable(self, writer):

        #connection 
        con = None

        #define the index of all the fields
        #---------step 2:connect to mysql----------
        con = mdb.connect('localhost','root','testgce','douban_movie_v3')

        #t_num = FieldType.NumericType it is wrong!!
        t_num = FieldType()
        t_num.setStored(False)

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t3 = FieldType()
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(True)
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        maxDict = utils.maxDict
        #加权数值范围
        base = DOC_BOOST_RANGE[0]
        upper = DOC_BOOST_RANGE[1]

        with con:
            # Careful with codecs
            con.set_character_set('utf8')

            cur = con.cursor()
            # Aagin the codecs
            cur.execute('SET NAMES utf8;')
            cur.execute('SET CHARACTER SET utf8;')
            cur.execute('SET character_set_connection=utf8;')
            
            #------step 3: choose the right table------
            cur.execute("SELECT * FROM movie_items")

            numrows = int(cur.rowcount)
            print 'numrows:',numrows
            for i in range(numrows):
                print
                row = cur.fetchone()

                #------step 4:Index your field------
                summary = row[SUMMARY]  
                subject_id = row[SUBJECT_ID]


                print 'id'+subject_id
                year = utils.formatYear(row[YEAR])
                try:
                    date = DateTools.stringToDate(year.replace('-',' '))
                    wtfFile = open('wtf.txt','a')
                    dateStr  = DateTools.dateToString(date,DateTools.Resolution.DAY)
                except:
                    wtfFile.write(year+'\n')

                        

                doc = Document()

                #boosting
                boostProb = utils.calcBoostProb(row,maxDict,dateStr)
                boost = base + boostProb*(upper-base)

                doc.add(FloatField("boost",boost,Field.Store.YES))
                doc.add(StringField("year",dateStr,Field.Store.YES))
                print 'dateStr:'+dateStr
                #A text field is a sequence of terms that has been tokenized while a string field is a single term (although it can also be multivalued.)

                do_count = row[DO_COUNT] if row[DO_COUNT] != None else 0
                wish_count = row[COLLECT_COUNT] if row[WISH_COUNT] != None else 0

                #fields which should not be analyzed
                doc.add(FloatField("rating_average",float(row[RATING_AVERAGE]),Field.Store.YES))
                doc.add(FloatField("rating_stars", float(row[RATING_STARS]), Field.Store.YES))
                doc.add(IntField("reviews_count", int(row[REVIEWS_COUNT]), Field.Store.YES))
                #doc.add(FloatField("year", float(row[YEAR]), Field.Store.YES).setBoost(boost))
                doc.add(IntField("collect_count", int(row[COLLECT_COUNT]), Field.Store.YES))
                doc.add(IntField("do_count", int(do_count), Field.Store.YES))
                doc.add(IntField("wish_count", int(wish_count), Field.Store.YES))
                doc.add(IntField("subject_id", int(row[SUBJECT_ID]), Field.Store.YES))
                doc.add(IntField("comments_count", int(row[COMMENTS_COUNT]), Field.Store.YES))
                doc.add(IntField("ratings_count", int(row[RATINGS_COUNT]), Field.Store.YES))
                doc.add(StringField("image_small", row[IMAGE_SMALL], Field.Store.YES))

                #fields which should be analyzed with WhitespaceAnalyzer
                #attention!!! dont use a long sentence like :
                #doc.add(Field("genres",    row[GENRES].replace(delim,' '),    t3).setBoost(boost))
                #or you'll get a null pointer error
                f = Field("countries", row[COUNTRIES].replace(delim,' '), t3)
                f.setBoost(boost)
                doc.add(f)

                #process casts
                raw_casts = row[CASTS].replace(delim,' ')
                f = Field("raw_casts", raw_casts , t1)
                f.setBoost(boost)
                doc.add(f)

                #将英文人名中的 ·
                raw_casts = raw_casts.replace('·',' ')
                
                if len(raw_casts.split(' '))<CASTS_LEN:
                    #平局人名长度是4
                    casts = raw_casts + ' ¥¥¥¥'*(CASTS_LEN-len(raw_casts.split(' ')))
                f = Field("casts", casts , t3)
                f.setBoost(boost)
                doc.add(f)

                #process directors
                raw_directors = row[DIRECTORS].replace(delim,' ')
                f = Field("raw_directors",raw_directors, t1)
                f.setBoost(boost)
                doc.add(f)

                #将英文人名中的 · 替换
                raw_directors = raw_directors.replace('·',' ')

                if len(raw_directors.split(' '))<DIRECTORS_LEN:
                    #平局人名长度是4
                    directors = raw_directors + ' ¥¥¥¥'*(DIRECTORS_LEN-len(raw_directors.split(' ')))
                f = Field("directors", directors, t3)
                f.setBoost(boost)
                doc.add(f)

                Field("genres",    row[GENRES].replace(delim,' '),    t3)
                f.setBoost(boost)
                doc.add(f)

                Field("subtype",   row[SUBTYPE].replace(delim,' '),   t3)
                f.setBoost(boost)
                doc.add(f)

                #it is wrong cause indexable field has no method setBoost
                # fieldList = doc.getFields()  # is not a python 'list' , but a 'List' which is unindexable                
                # for eachField in fieldList:
                #     eachField.setBoost(boost)


                #user_tags 原始字符串要存,reRank要用:
                doc.add(StringField("raw_user_tags",row[USER_TAGS],Field.Store.YES))
                doc.add(StringField("raw_others_like",row[OTHERS_LIKE],Field.Store.YES))
                

                user_tags_str = ''
                others_like_str = ''
                tags_len = 0
                

                if row[USER_TAGS]!='':
                    user_tags_list = row[USER_TAGS].split(delim) 
                    for tag_pair in user_tags_list:
                        if tag_pair!='':#字符串的最后一个字符是¥,这样split之后最后一个元素是空字符
                            #print 'tag_pair'+tag_pair+'hhe'
                            tag_name = tag_pair.split(delim_uo)[0]+' ' # dont forget this space !!
                            tag_num = tag_pair.split(delim_uo)[1]
                            tag_num_processed = int(int(tag_num)/TAG_SPAN)+1 #最小为1
                            #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
                            user_tags_str = user_tags_str +' '+ tag_name * tag_num_processed
                            tags_len = tags_len + tag_num_processed #最后得到总共词的个数


                if tags_len<TAGS_AVER_LEN:
                    #填充tags,目测3是平均长度,所以使用 ¥¥¥
                    user_tags_str = user_tags_str +' ¥¥¥'*(TAGS_AVER_LEN - tags_len)
                #


                if row[OTHERS_LIKE]!='':
                    for like_pair in row[OTHERS_LIKE].split(delim):
                        if like_pair!='':
                            others_like_str = others_like_str +' '+like_pair.split(delim_uo)[1]


                #start process adjs
                if row[ADJS] != None:
                    raw_adjs = row[ADJS][:-1]

                    adjs_str = ''
                    adjs_len = 0
                    if row[ADJS] != '' and row[ADJS] != '\n':
                        #'重要=4.0,特殊=4.0'
                        adjs_str = row[ADJS]
                        adjs_list = adjs_str.split(',')
                        for adj_pair in adjs_list:
                            #print 'adj_pair:'+adj_pair+'hhe'
                            adj_name = adj_pair.split('=')[0]
                            adj_num = adj_pair.split('=')[1]

                            #去换行符,转换int
                            if adj_num[-1] == '\n':
                                adj_num = adj_num[0:-1]
                            adj_num = int(float(adj_num))

                            add_adj=''
                            # #同义词
                            # adj_name_bro = searchDictValue(adjMap,adj_name)
                            # if adj_name_bro == -1: #表示没有结果,即未找到近义词,不添加
                            #     add_adj = ''
                            # else:
                            #     add_adj = (adj_name_bro+' ')*adj_num
                            #     raw_adjs = raw_adjs + ',' + adj_name_bro+'='+str(adj_num)
                                
                            adjs_str = adjs_str + ' ' + (adj_name+' ') * adj_num +add_adj
                            adjs_len = adjs_len + adj_num #最后得到总共tags的个数

                    #print raw_adjs
                    doc.add(StringField("raw_adjs",raw_adjs,Field.Store.YES))

                    if adjs_len<ADJS_AVER_LEN:
                        #填充 adjs_str,目测2是平均长度,所以使用 "¥¥"
                        adjs_str = adjs_str +' ¥¥'*(ADJS_AVER_LEN - adjs_len)

                    f = Field("adjs", adjs_str, t3)
                    f.setBoost(boost)
                    doc.add(f)

                f = Field("user_tags", user_tags_str, t3)
                f.setBoost(boost)
                doc.add(f)

                f = Field("others_like", others_like_str, t3)
                f.setBoost(boost)
                doc.add(f)



                #fields which should be analyzed with good analyzer
                f = Field("title", row[TITLE], t3)                
                f.setBoost(boost)
                doc.add(f)

                f = Field("original_title", row[ORIGINAL_TITLE], t3)
                f.setBoost(boost)
                doc.add(f)

                f = Field("summary_segmentation", row[SUMMARY_SEGMENTATION], t3)
                f.setBoost(boost)
                doc.add(f)

                f = Field("aka", row[AKA], t2)
                f.setBoost(boost)
                doc.add(f)

                if len(summary) > 0:
                    print subject_id +'--->'+':\n    '+ row[TITLE]
                    try:
                        summary_unicoded = unicode(summary, 'utf-8') #test the encoding 
                    except Exception,e:
                        print "Decode Failed: ", e
                    f = Field('summary', summary, t2)
                    f.setBoost(boost)
                    doc.add(f)
                else:
                    print "warning:\n" + subject_id +'---> No content!'
                print 'boosting:' + str(boost)

                #for debug
                if boost>upper:
                    print boostProb
                    print maxDict
                    
                    exit(0)

                writer.addDocument(doc)
Ejemplo n.º 54
0
	def __init__(self):

		self.mDocumentDirectory = settings.ADMINS_ENGINE.mDocumentDirectory
		self.mIndexDirectory = settings.ADMINS_ENGINE.mIndexDirectory
		self.mAnalyzers = settings.ADMINS_ENGINE.getIndexingAnalyzers()


		############################# Writer Configurattion #####################################
		map = HashMap()
		map.put('name', self.mAnalyzers['name'])
		map.put('parent', self.mAnalyzers['parent'])
		map.put('content', self.mAnalyzers['default'])
		map.put('id', self.mAnalyzers['id'])		

		analyzerWrapper = PerFieldAnalyzerWrapper(self.mAnalyzers['default'], map)

		self.mWriterConfig = IndexWriterConfig(Version.LUCENE_CURRENT, analyzerWrapper)
		self.mWriterConfig.setOpenMode(settings.ADMINS_ENGINE.mOpenMode)

		if settings.ADMINS_ENGINE.mSimilarity != None:
			self.mWriterConfig.setSimilarity(settings.ADMINS_ENGINE.mSimilarity)
		########################################################################################


		directory = SimpleFSDirectory(File(self.mIndexDirectory))
		self.mIndexWriter = IndexWriter(directory, self.mWriterConfig)


		############################# FieldType Prepration #####################
		nameField = FieldType()
		nameField.setIndexed(True)
		nameField.setStored(True)
		nameField.setTokenized(True)
		nameField.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY)

		parentField = FieldType()
		parentField.setIndexed(True)
		parentField.setStored(True)
		parentField.setTokenized(True)
		parentField.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY)

		contentField = FieldType()
		contentField.setIndexed(True)
		contentField.setStored(True)
		contentField.setTokenized(True)
		contentField.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS)

		idField = FieldType()
		idField.setIndexed(True)
		idField.setStored(True)
		idField.setTokenized(False)
		idField.setIndexOptions(FieldInfo.IndexOptions.DOCS_ONLY)


		self.mFieldTypes = {
			'name' 		: nameField,
			'parent'	: parentField,
			'content'	: contentField,
			'id'		: idField
		}
		#######################################################################

		self.mLog = ""
Ejemplo n.º 55
0
    def indexDocs(self, root, writer):

        t1 = FieldType()
        t1.setIndexed(False)
        t1.setStored(True)
        t1.setTokenized(False)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(True)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        count = 0
        filedic = {}
        urlfile = open("sportsinformation.txt", "r")
        for line in urlfile.readlines():
            urlname, pagename, newsdate = line.split('*my_sep*')
            newsdate = newsdate.strip('\n')
            if len(newsdate) == 0:
                continue
            filedic[pagename] = [urlname, newsdate]
            '''pageANDurl = line.split('\t')
            urlname = pageANDurl[0]
            webpage = pageANDurl[1].strip('\n')
            filedic[webpage] = urlname'''
        urlfile.close()

        for root, dirnames, filenames in os.walk(root):
            for filename in filenames:

                if (filename.endswith('apk') or filename.endswith('pdf') \
                        or filename.endswith('exe') or filename.endswith('rar') \
                        or filename.endswith('zip')):
                    print filename, " SKIP THIS FILE!"
                    continue
                count += 1
                print "adding", filename, " ", count
                try:
                    path = os.path.join(root, filename)
                    file = open(path)
                    contents = unicode(file.read(), 'utf8', 'ignore')
                    soup = BeautifulSoup(contents, "html.parser")
                    title = soup.find("title")
                    if title:
                        title = title.text
                    else:
                        title = "This page has no title"
                    contents = ''.join(soup.findAll(text=True))
                    contents = ' '.join(jieba.cut(contents))
                    file.close()
                    url = filedic[filename][0]
                    newsdate = str(filedic[filename][1])
                    print type(newsdate)
                    print type(url)
                    doc = Document()
                    doc.add(Field("title", title, t1))
                    doc.add(Field("url", url, t1))
                    doc.add(Field("date", newsdate, t1))

                    sites = []
                    site = urlparse.urlparse(url).netloc
                    siteparts = site.split(".")
                    length = len(siteparts)
                    while (length > 0):
                        length -= 1
                        site = '.'.join(siteparts[length:])
                        sites.append(site)
                    site = ' '.join(sites)
                    doc.add(Field("site", site, t2))

                    if len(contents) > 0:
                        print "yes"
                        doc.add(Field("contents", contents, t2))
                    else:
                        print "warning: no content in %s" % filename
                    writer.addDocument(doc)
                except Exception, e:
                    print "Failed in indexDocs:", e
Ejemplo n.º 56
0
    def indexDocs(self, writer):

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)



        for folder in os.listdir(rawDir):
            for fileName in os.listdir(rawDir+'/'+folder):
                if not fileName.endswith('.json'):
                    continue


                print fileName
                
                subject_id = fileName.split('_')[0]
                
                # print rawDict
                print 'id:'+subject_id
                print 'folder:'+str(folder)

                rawPath = rawDir+'/' +folder +'/'+subject_id+'_res.json'
                adjPath = adjDir+'/' +folder +'/'+subject_id+'_adj.json'
                #tfidfPath = tf_idfDir + '/' + subject_id+'_tfidf.json'

                print adjPath

                rawFile = open(rawPath,'r')
                raw = rawFile.read()
                if raw =='':
                    with open(baseDir+'/'+'err_no_raw_content.txt','a') as err:
                        err.write(subject_id+'\n')
                # if subject_id =='6018943':
                #     rawFile.seek(0)

                rawFile.close()
                adjFile = open(adjPath,'r')
                adj = adjFile.read()

                adjFile.close()

                raw = getRidOfBOM(raw)
                adj = getRidOfBOM(adj)

                if raw != '':
                    rawDict = json.loads(raw)
                adjDict = json.loads(adj)

                rawAll = rawDict['summary'] +' '+ rawDict['user_tags'] + ' '+rawDict['comments']
                summary_adjs = adjDict['summary_adjs']
                comments_adjs = adjDict['comments_adjs']
                title = adjDict['title']
                rating_average = adjDict['rating_average']
                comments_count = adjDict['comments_count']

                doc = Document()
                doc.add(Field("folder",folder,t1))
                doc.add(Field("title", title, t1))
                doc.add(Field("subject_id", subject_id, t1))
                doc.add(IntField("comments_count", comments_count, Field.Store.YES))
                doc.add(FloatField("rating_average", rating_average, Field.Store.YES))


                if len(summary_adjs)>0:
                    print summary_adjs
                    exit()
                doc.add(Field("summary_adjs", summary_adjs, t2))

                #if len(comments_adjs)>0:
                doc.add(Field("comments_adjs", comments_adjs, t2))

                writer.addDocument(doc)
	def reindex(self):
		''' Re-indexes the entire database into Index file'''
		start = time.time()

		# get all posts
		posts = self._tuples_to_dict(self._fetch_all_questions(), self._posts_fields)
		if not posts:
			raise Exception("FATAL Error: Could not fetch posts from Database")

		# open indexer
		# lucene.initVM(vmargs=['-Djava.awt.headless=true'])
		# print 'lucene', lucene.VERSION

		store = SimpleFSDirectory(File(self.index_dir))
		analyzer = StandardAnalyzer(Version.LUCENE_CURRENT)
		config = IndexWriterConfig(Version.LUCENE_CURRENT, analyzer)
		config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND)
		writer = IndexWriter(store, config)

		indexedField = FieldType()
		indexedField.setIndexed(True)
		indexedField.setStored(True)
		indexedField.setTokenized(True)
		indexedField.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

		storedField = FieldType()
		storedField.setIndexed(False)
		storedField.setStored(True)
		storedField.setTokenized(False)
		storedField.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

		fieldTypes = {
						'type'		: storedField,
						'id'		: storedField,
						'title'		: indexedField,
						'question'	: indexedField,
						'answer'	: indexedField,
						# 'comment'	: indexedField,
						'tag'		: indexedField,
						'extra'		: indexedField,
		}

		# get their comments
		num_docs = 0
		for post in posts:
			if self.status_mode: print "\r {0:.2f} %complete".format(((num_docs/142627.0)*100)),
			if self.debug : print "\n","*"*20,"\nIndexing post: ", post['id'], "from ", post['extra']
			if self.debug and self.verbose_values: print post
			answers = self._tuples_to_dict(self._fetch_all_answers(post['id'], post['extra']), self._answer_fields)


			# add comment field
			for answer in answers:
				num_docs += 1
				if self.debug: print "\n","+"*10, "\nMaking new Document"
				doc = Document()
				if self.debug: print "Adding doc type"
				doc.add(Field("type", self.doctype, fieldTypes['type']))
				
				# make fields
				if self.debug: print "Adding post fields"
				for i in xrange(len(self._posts_fields)):
					f = Field(self._posts_fields[i], self._cleanup_tag(post[self._posts_fields[i]]), fieldTypes[self._posts_fields[i]])
					f.setBoost(self._fields_boost[self._posts_fields[i]])
					doc.add(f)


				if self.status_mode: print "\t Indexing answer: ", answer['answer_id']
				if self.debug and self.verbose_values: print answer
				# answered_doc = copy.deepcopy(doc)
				# make comment field
				f = Field("answer", self._cleanup_tag(answer['answer']), fieldTypes['answer'])
				f.setBoost(self._fields_boost['answer'])
				doc.add(f)
				# calculate paths
				# commented_doc = copy.deepcopy(answered_doc)
				# comments = self._comments_to_comment_string(self._tuples_to_dict(self._fetch_all_comments(answer['id']), self._comment_fields))

				# if self.debug: print "\t\tAdding comments: ", comments
				# commented_doc.add(Field("comment", self._cleanup_tag(comments), fieldTypes['comment']))

				# write index
				if self.debug: print "\tAdding document {doc_id} to index".format(doc_id=post['id'])
				writer.addDocument(doc)

				# del answered_doc
				# del commented_doc

			if self.debug: print "Commiting document to index"
			writer.commit()

		# close index
		if self.status_mode: print "Closing index write"
		writer.close()
		end = time.time() - start

		if self.status_mode: print "\n","-"*20, \
			"\nTotal time spent in indexing: ", end, "seconds" \
			"\nIndexed {num_docs} documents".format(num_docs=num_docs)
Ejemplo n.º 58
0
                            doc.add(Field("former_scents", former_scents, t3))
                            mid_scents = ' '.join(scents_list[1])
                            doc.add(Field("mid_scents", mid_scents, t3))
                            last_scents = ' '.join(scents_list[2])
                            doc.add(Field("last_scents", last_scents, t3))
                    except Exception, e:
                        print "Failed in indexDocs:", e
                        print line
                        print cnt
                    cnt += 1
                    writer.addDocument(doc)
            except Exception, e:
                print "Failed in indexDocs:", e

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t3 = FieldType()
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(True)
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(False)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
Ejemplo n.º 59
0
    def indexTable(self, writer):

        #connection 
        con = None

        #define the index of all the fields
        #---------step 2----------
        con = mdb.connect('localhost','root','testgce','moviedata')

        #t_num = FieldType.NumericType it is wrong!!
        t_num = FieldType()
        t_num.setStored(False)

        t1 = FieldType()
        t1.setIndexed(True)
        t1.setStored(True)
        t1.setTokenized(False)
        t1.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)
        
        t2 = FieldType()
        t2.setIndexed(True)
        t2.setStored(False)
        t2.setTokenized(True)
        t2.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        t3 = FieldType()
        t3.setIndexed(True)
        t3.setStored(True)
        t3.setTokenized(True)
        t3.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS)

        with con:
            # Careful with codecs
            con.set_character_set('utf8')

            cur = con.cursor()
            # Aagin the codecs
            cur.execute('SET NAMES utf8;')
            cur.execute('SET CHARACTER SET utf8;')
            cur.execute('SET character_set_connection=utf8;')
            
            #------step 3------
            cur.execute("SELECT * FROM movie_items")

            numrows = int(cur.rowcount)
            print 'numrows:',numrows
            for i in range(numrows):
                row = cur.fetchone()

                #------step 4------
                summary = row[SUMMARY]  
                subject_id = row[SUBJECT_ID]


                print 'id'+subject_id
                #print 'summary'+summary+'end'

                doc = Document()
                #fields which should not be analyzed
                doc.add(FloatField("rating_average",float(row[RATING_AVERAGE]),Field.Store.NO))
                doc.add(FloatField("rating_stars", float(row[RATING_STARS]), Field.Store.NO))
                doc.add(IntField("reviews_count", int(row[REVIEWS_COUNT]), Field.Store.NO))
                #doc.add(FloatField("year", float(row[YEAR]), Field.Store.NO))
                doc.add(IntField("collect_count", int(row[COLLECT_COUNT]), Field.Store.NO))
                doc.add(IntField("subject_id", int(subject_id), Field.Store.YES))
                doc.add(IntField("comments_count", int(row[COMMENTS_COUNT]), Field.Store.NO))
                doc.add(IntField("ratings_count", int(row[RATINGS_COUNT]), Field.Store.NO))
                doc.add(Field("image_small", row[IMAGE_SMALL], t1))

                #fields which should be analyzed with WhitespaceAnalyzer
                doc.add(Field("countries", row[COUNTRIES].replace(delim,' '), t3))
                doc.add(Field("casts",     row[CASTS].replace(delim,' '),     t3))
                doc.add(Field("genres",    row[GENRES].replace(delim,' '),    t3))
                doc.add(Field("subtype",   row[SUBTYPE].replace(delim,' '),   t2))
                doc.add(Field("directors", row[DIRECTORS].replace(delim,' '), t3))

                user_tags_str = ''
                others_like_str = ''
                
                # print 'user_tags'+row[USER_TAGS]
                # print 'others_like'+row[OTHERS_LIKE]
                
                if row[USER_TAGS]!='':
                    for tag_pair in row[USER_TAGS].split(delim):
                        if tag_pair!='':#字符串的最后一个字符是:,这样split之后最后一个元素是空字符
                            user_tags_str = user_tags_str +' '+tag_pair.split(delim_uo)[0]
                if row[OTHERS_LIKE]!='':
                    for like_pair in row[OTHERS_LIKE].split(delim):
                        if like_pair!='':
                            others_like_str = others_like_str +' '+like_pair.split(delim_uo)[1]

                # print user_tags_str
                # print others_like_str


                doc.add(Field("user_tags", user_tags_str, t3))
                doc.add(Field("others_like", others_like_str, t3))

                #fields which should be analyzed with good analyzer
                doc.add(Field("title", row[TITLE], t3))                
                doc.add(Field("original_title", row[ORIGINAL_TITLE], t2))
                doc.add(Field("summary_segmentation", row[SUMMARY_SEGMENTATION], t2))
                doc.add(Field("aka", row[AKA], t2))

                if len(summary) > 0:
                    print subject_id +'--->'+':\n    '+ row[TITLE]
                    try:
                        summary_unicoded = unicode(summary, 'utf-8') #test the encoding 
                    except Exception,e:
                        print "Decode Failed: ", e
                    doc.add(Field('summary', summary, t2))
                else:
                    print "warning:\n" + subject_id +'---> No content!'
                writer.addDocument(doc)