class MyPythonEnglishAnalyzer(PythonEnglishAnalyzer):
    """
    Class of our custom analyzer that uses filters:
        -StandardTokenizer.
        -EnglishPossessiveFilter.
        -LowerCaseFilter.
        -DiacriticFilter.
        -StopFilter.
        -SetKeywordMarkerFilter
    """

    ENGLISH_STOP_WORDS_SET = CharArraySet.unmodifiableSet(CharArraySet(Arrays.asList(
        ["a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not",
         "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was",
         "will", "with"]), False))

    def __init__(self, stopwords=ENGLISH_STOP_WORDS_SET, stemExclusionSet=CharArraySet.EMPTY_SET):
        super().__init__(self, stopwords)
        self.stopwords = stopwords
        self.stemExclusionSet = stemExclusionSet

    def createComponents(self, fieldName):
        source = StandardTokenizer()
        result = EnglishPossessiveFilter(source)
        result = LowerCaseFilter(result)
        result = DiacriticFilter(result)
        result = StopFilter(result, self.stopwords)
        if self.stemExclusionSet.isEmpty() is False:
            result = SetKeywordMarkerFilter(result, self.stemExclusionSet)
        result = PorterStemFilter(result)
        return Analyzer.TokenStreamComponents(source, result)

    def normalize(self, fieldName, input):
        return LowerCaseFilter(input)
Пример #2
0
    def create_index(self, index_folder):
        os.mkdir(index_folder)

        self.t1 = FieldType()
        self.t1.setStored(True)
        self.t1.setIndexOptions(IndexOptions.DOCS)

        self.t2 = FieldType()
        self.t2.setStored(True)
        self.t2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

        self.t3 = FieldType()
        self.t3.setStored(True)
        self.t3.setIndexOptions(IndexOptions.NONE)

        fsDir = MMapDirectory(Paths.get(index_folder))
        writerConfig = IndexWriterConfig(
            MySimpleAnalyzer(
                CharArraySet(collections.JavaSet(utils.STOPWORDS), True)))
        writerConfig.setSimilarity(MyTFIDFSimilarity())
        writerConfig.setRAMBufferSizeMB(16384.0)  # 14g
        self.writer = IndexWriter(fsDir, writerConfig)
        logger.info(f"{self.writer.numDocs()} docs in index")
        logger.info("Indexing documents...")

        doc_ids = self.doc_db.get_doc_ids()
        for doc_id in tqdm(doc_ids, total=len(doc_ids)):
            text = self.doc_db.get_doc_text(doc_id)
            tokens = self.doc_db.get_doc_tokens(doc_id)
            self.add_doc(doc_id, text, tokens)

        logger.info(f"Indexed {self.writer.numDocs()} docs.")
        self.writer.forceMerge(1)  # to increase search performance
        self.writer.close()
Пример #3
0
    def __init__(self, args):

        self.env = lucene.initVM(initialheap='28g',
                                 maxheap='28g',
                                 vmargs=['-Djava.awt.headless=true'])
        self.args = args

        index_folder = os.path.join(DATA_DIR, args.index_folder)
        if not os.path.exists(index_folder):
            self.doc_db = DocDB()
            logger.info(f'Creating index at {index_folder}')
            self.create_index(index_folder)

        fsDir = MMapDirectory(Paths.get(index_folder))
        self.searcher = IndexSearcher(DirectoryReader.open(fsDir))
        self.searcher.setSimilarity(MyTFIDFSimilarity())
        self.analyzer = MySimpleAnalyzer(
            CharArraySet(collections.JavaSet(utils.STOPWORDS), True))
        self.pool = ThreadPool(processes=args.num_search_workers)
Пример #4
0
class Indexer:
    """
        Class which will define our indexer which contains
        the methods of indexing documents.
    """
    ENGLISH_STOP_WORDS_SET = CharArraySet.unmodifiableSet(CharArraySet(Arrays.asList(
        ["a", "a's", "able", "about", "above", "according", "accordingly", "across", "actually", "after",
         "afterwards", "again", "against", "ain't", "all", "allow", "allows", "almost", "alone", "along", "already",
         "also", "although", "always", "am", "among", "amongst", "an", "and", "another", "any", "anybody", "anyhow",
         "anyone", "anything", "anyway", "anyways", "anywhere", "apart", "appear", "appreciate", "appropriate",
         "are", "aren't", "around", "as", "aside", "ask", "asking", "associated", "at", "available", "away",
         "awfully", "b", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand",
         "behind", "being", "believe", "below", "beside", "besides", "best", "better", "between", "beyond", "both",
         "brief", "but", "by", "c", "c'mon", "c's", "came", "can", "can't", "cannot", "cant", "cause", "causes",
         "certain", "certainly", "changes", "clearly", "co", "com", "come", "comes", "concerning", "consequently",
         "consider", "considering", "contain", "containing", "contains", "corresponding", "could", "couldn't",
         "course", "currently", "d", "definitely", "described", "despite", "did", "didn't", "different", "do",
         "does", "doesn't", "doing", "don't", "done", "down", "downwards", "during", "e", "each", "edu", "eg",
         "eight", "either", "else", "elsewhere", "enough", "entirely", "especially", "et", "etc", "even", "ever",
         "every", "everybody", "everyone", "everything", "everywhere", "ex", "exactly", "example", "except", "f",
         "far", "few", "fifth", "first", "five", "followed", "following", "follows", "for", "former", "formerly",
         "forth", "four", "from", "further", "furthermore", "g", "get", "gets", "getting", "given", "gives", "go",
         "goes", "going", "gone", "got", "gotten", "greetings", "h", "had", "hadn't", "happens", "hardly", "has",
         "hasn't", "have", "haven't", "having", "he", "he's", "hello", "help", "hence", "her", "here", "here's",
         "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "hi", "him", "himself", "his", "hither",
         "hopefully", "how", "howbeit", "however", "i", "i'd", "i'll", "i'm", "i've", "ie", "if", "ignored",
         "immediate", "in", "inasmuch", "inc", "indeed", "indicate", "indicated", "indicates", "inner", "insofar",
         "instead", "into", "inward", "is", "isn't", "it", "it'd", "it'll", "it's", "its", "itself", "j", "just",
         "k", "keep", "keeps", "kept", "know", "knows", "known", "l", "last", "lately", "later", "latter",
         "latterly", "least", "less", "lest", "let", "let's", "like", "liked", "likely", "little", "look",
         "looking", "looks", "ltd", "m", "mainly", "many", "may", "maybe", "me", "mean", "meanwhile", "merely",
         "might", "more", "moreover", "most", "mostly", "much", "must", "my", "myself", "n", "name", "namely", "nd",
         "near", "nearly", "necessary", "need", "needs", "neither", "never", "nevertheless", "new", "next", "nine",
         "no", "nobody", "non", "none", "noone", "nor", "normally", "not", "nothing", "novel", "now", "nowhere",
         "o", "obviously", "of", "off", "often", "oh", "ok", "okay", "old", "on", "once", "one", "ones", "only",
         "onto", "or", "other", "others", "otherwise", "ought", "our", "ours", "ourselves", "out", "outside",
         "over", "overall", "own", "p", "particular", "particularly", "per", "perhaps", "placed", "please", "plus",
         "possible", "presumably", "probably", "provides", "q", "que", "quite", "qv", "r", "rather", "rd", "re",
         "really", "reasonably", "regarding", "regardless", "regards", "relatively", "respectively", "right", "s",
         "said", "same", "saw", "say", "saying", "says", "second", "secondly", "see", "seeing", "seem", "seemed",
         "seeming", "seems", "seen", "self", "selves", "sensible", "sent", "serious", "seriously", "seven",
         "several", "shall", "she", "should", "shouldn't", "since", "six", "so", "some", "somebody", "somehow",
         "someone", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry", "specified",
         "specify", "specifying", "still", "sub", "such", "sup", "sure", "t", "t's", "take", "taken", "tell",
         "tends", "th", "than", "thank", "thanks", "thanx", "that", "that's", "thats", "the", "their", "theirs",
         "them", "themselves", "then", "thence", "there", "there's", "thereafter", "thereby", "therefore",
         "therein", "theres", "thereupon", "these", "they", "they'd", "they'll", "they're", "they've", "think",
         "third", "this", "thorough", "thoroughly", "those", "though", "three", "through", "throughout", "thru",
         "thus", "to", "together", "too", "took", "toward", "towards", "tried", "tries", "truly", "try", "trying",
         "twice", "two", "u", "un", "under", "unfortunately", "unless", "unlikely", "until", "unto", "up", "upon",
         "us", "use", "used", "useful", "uses", "using", "usually", "uucp", "v", "value", "various", "very", "via",
         "viz", "vs", "w", "want", "wants", "was", "wasn't", "way", "we", "we'd", "we'll", "we're", "we've",
         "welcome", "well", "went", "were", "weren't", "what", "what's", "whatever", "when", "whence", "whenever",
         "where", "where's", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether",
         "which", "while", "whither", "who", "who's", "whoever", "whole", "whom", "whose", "why", "will", "willing",
         "wish", "with", "within", "without", "won't", "wonder", "would", "would", "wouldn't", "x", "y", "yes",
         "yet", "you", "you'd", "you'll", "you're", "you've", "your", "yours"]), False))

    def __init__(self, index_dir):
        """

        :param index_dir: the dir where to store the index.
        """
        self.indexDir = index_dir
        if not os.path.exists(index_dir):
            os.mkdir(index_dir)
        self.analyzer = MyPythonEnglishAnalyzer(
            stopwords=self.ENGLISH_STOP_WORDS_SET)
        conf = IndexWriterConfig(self.analyzer)
        conf.setUseCompoundFile(False)
        directory = FSDirectory.open(Paths.get(index_dir))
        self.writer = IndexWriter(directory, conf)

    def index_folder(self, folder2index):
        """
        :param folder2index: the folder to be indexed.
        :return:
        """
        # Browse all the files from root and store the paths
        files = glob.glob(folder2index + '**/*.xml', recursive=True)
        num_lines = len(files)
        print('\n==> Start processing....\n')
        # Iterate in the files paths list
        with tqdm(total=num_lines) as pbar:
            for file in files:
                pbar.update(1)
                doc = WikiDocument(file)  # this parse the wikipedia page
                self.index_document(doc)  # this indexes the wikipedia page
        print("\n==> Please wait ...\n")
        self.writer.commit()
        print('A total of ' + str(self.writer.getDocStats().numDocs) +
              ' have been indexed.')
        self.close()

    def index_document(self, wiki_doc):
        """
        :param wiki_doc: the document to be indexed.
        :return:
        """
        # Method that indexes documents
        i = 0
        for section in wiki_doc.sections:
            doc = Document()
            doc.add(StringField("id_article", wiki_doc.id, Field.Store.YES))
            doc.add(TextField("title_article", wiki_doc.title, Field.Store.YES))
            doc.add(StringField("id_section", str(
                wiki_doc.id) + "_" + str(i), Field.Store.YES))
            doc.add(TextField("title_section", section.title, Field.Store.YES))
            doc.add(TextField("content_section", section.text, Field.Store.YES))
            self.writer.addDocument(doc)
            i += 1

    def close(self):
        # close the index
        self.writer.close()
Пример #5
0
from org.apache.lucene.index import Term
from org.apache.lucene.search import TermQuery, BooleanQuery, BooleanClause
from org.apache.lucene.store import SimpleFSDirectory
from org.apache.lucene.util import Version, BytesRef
from org.apache.lucene.analysis import CharArraySet

if __name__ == '__main__':
    # http://svn.apache.org/viewvc/lucene/pylucene/trunk/samples/IndexFiles
    # .py?view=markup
    json_file = sys.argv[1]
    index_folder = sys.argv[2]

    glog.setLevel(glog.INFO)
    lucene.initVM()
    store = SimpleFSDirectory(Paths.get(index_folder))
    stop_words = CharArraySet(50, True)
    c_analyzer = ClassicAnalyzer(stop_words)
    analyzer = LimitTokenCountAnalyzer(c_analyzer, 1048576)
    config = IndexWriterConfig(analyzer)
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND)
    writer = IndexWriter(store, config)

    print('%d docs in index' % writer.numDocs())
    print('Indexing json files...')

    # For text field.
    t1 = FieldType()
    t1.setStored(False)
    t1.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)

    with codecs.open(json_file, encoding='utf8') as f:
Пример #6
0
def main(index_dir, input_dir):
    """Creates a Lucene Index, and indexes every .json file it finds.
    It utilizes a stopwords.txt to filter out stop words"""
    lucene.initVM()

    logger.info("Loading stop words from stopwords.txt")
    f = open('stopwords.txt', 'r')
    stopwords = set([])
    for line in f:
        stopwords.add(line.strip())
    f.close()
    logger.debug('Stop words: %s' % str(stopwords))
    temp = CharArraySet(1, True)

    for stopword in stopwords:
        temp.add(stopword)

    stopwords = temp

    # Create index
    logger.info("Creating Lucene index [%s]..." % index_dir)

    fs_dir = SimpleFSDirectory(Paths.get(index_dir))
    analyzer = StandardAnalyzer(stopwords)
    writerConfig = IndexWriterConfig(analyzer)
    writer = IndexWriter(fs_dir, writerConfig)

    logger.info("Currently there are %d documents in the index..." %
                writer.numDocs())

    # Index documents
    onlyfiles = [
        f for f in listdir(input_dir)
        if isfile(join(input_dir, f)) and f.endswith('.json')
    ]
    for f in onlyfiles:
        try:
            journal_code = f.split('.')[0]
            f = join(input_dir, f)
            json_data = open(f)
            data = json.load(json_data)
            for entry in data:
                doc = Document()
                doc.add(StringField("journal", journal_code, Field.Store.YES))
                doc.add(StringField("url", entry['url'], Field.Store.YES))
                doc.add(StringField("date", entry['date'], Field.Store.YES))
                doc.add(TextField("title", entry['title'], Field.Store.YES))
                writer.addDocument(doc)
            json_data.close()
        except IOError as v:
            try:
                (code, message) = v
            except (TypeError, ValueError):
                code = 0
                message = v
            logger.error("I/O Error: " + str(message) + " (" + str(code) + ")")
    logger.info("Indexed lines from stdin (%d documents in index)" %
                writer.numDocs())

    # Wrap it up
    # logger.info("About to optimize index of %d documents..." % writer.numDocs())
    # writer.optimize()
    # logger.info("...done optimizing index of %d documents" % writer.numDocs())

    logger.info("Closing index of %d documents..." % writer.numDocs())
    writer.close()

    reader = DirectoryReader.open(fs_dir)
    with open('all.csv', 'w') as csvfile:
        csvwriter = csv.writer(csvfile,
                               delimiter=',',
                               quotechar='"',
                               quoting=csv.QUOTE_ALL)
        for i in range(0, reader.numDocs()):
            doc = reader.document(i)
            csvwriter.writerow([
                doc.get('journal'),
                doc.get('date'),
                doc.get('url'),
                doc.get('title').strip().replace(',', '\,')
            ])