Esempio n. 1
0
    def readCorpusList(self):
        reader = ChunkedCorpusReader('C:/nltk_data/corpora/cookbook',
                                     r'.*\.chunk')

        for chunk in reader.chunked_words():
            try:
                print(chunk.leaves())
            except:
                print(chunk)
Esempio n. 2
0
                            tagset='en-brown')
reader1 = TaggedCorpusReader('/Users/atul/nltk_data',
                             r'brown.pos',
                             word_tokenizer=SpaceTokenizer())

print(reader.words())
print(reader.sents())
print(reader.tagged_words())
print(reader.tagged_sents())
print(
    reader.tagged_words(tagset='universal')
)  ## Mapping tags to universal format, if tagset is not correct every TAG will have UNK

## Reading chunk corpora #######
reader = ChunkedCorpusReader('/Users/atul/nltk_data',
                             r'treebank.chunk',
                             tagset='en-brown')
print(reader.chunked_words())  ## Word level structure
print(reader.chunked_sents())  ## Sentence level structure
print(reader.chunked_paras())  ## Paragraph level structure

## Reading classifed corpora ##################
## classification extracted using cat_pattern (from file name), or cat_dict or cat_file ######
from nltk.corpus.reader import CategorizedPlaintextCorpusReader

reader = CategorizedPlaintextCorpusReader(
    '/Users/atul/nltk_data', r'movie_.*\.txt', cat_pattern=r'movie_(\w+)\.txt'
)  ## Easiest is to read files for different category
reader.categories()
reader.fileids(categories=['neg'])
reader.fileids(categories=['pos'])
Esempio n. 3
0
from nltk.corpus.reader import ChunkedCorpusReader
from nltk.tokenize import SpaceTokenizer
import nltk

d = nltk.data.find('corpora/cookbook')
reader = ChunkedCorpusReader(d, r'.*\.chunk')
print(reader.chunked_words())
print(reader.chunked_sents())
print(reader.chunked_paras())

# reader.chunked_sents()[0].draw()
print(reader.chunked_sents()[0].leaves())