def prepare_pos_features(Language_model_set, output_file):
    corpus_root = '/home1/c/cis530/data-hw2/' + Language_model_set
    texts = PlaintextCorpusReader(corpus_root, '.*')
    text = texts.words()
    tagged_text = nltk.pos_tag(text)
    merged_tag_text = mergeTags(tagged_text)
    lists = seperate_pos(merged_tag_text)
    nouns_dist = FreqDist(lists[0])
    top_nouns = nouns_dist.keys()[:200]
    verbs_dist = FreqDist(lists[1])
    top_verbs =verbs_dist.keys()[:200]
    advs_dist = FreqDist(lists[2])
    top_advs =advs_dist.keys()[:100]
    prep_dist = FreqDist(lists[3])
    top_preps =prep_dist.keys()[:100]
    adjs_dist = FreqDist(lists[4])
    top_adjs =adjs_dist.keys()[:200]


    out = open(output_file, 'w')

    for n in top_nouns:
        out.write('NN'+ n + '\n')
    for v in top_verbs:
        out.write('VV'+ v + '\n')
    for av in top_advs:
        out.write('ADV'+ av + '\n')
    for p in top_preps:
        out.write('PREP'+ p + '\n')
    for aj in top_adjs:
        out.write('ADJ'+ aj + '\n')
Example #2
0
	def fileids(self, years='*'):
		"""
			Returns list all files or files exist in specific folder(s)
			
			>>> len(hr.fileids())
			3206
			>>> len(hr.fileids(years=1996))
			157
			>>> len(hr.fileids(years=[1996,2007]))
			246
			>>> hr.fileids()[0]
			'1996/HAM2-960622.xml'
		"""
		if type(years) is int:
			years = [str(years)]
		
		if years=='*':
			wordlists = PlaintextCorpusReader(self.hamshahri_root, '.*\.xml')
			fids = wordlists.fileids()
			return fids
		else:
			fids = []
			for year in years:
				wordlists = PlaintextCorpusReader(self.hamshahri_root, str(year) + '/.*\.xml')
				fids = fids + wordlists.fileids()
			return fids
def get_coarse_level_features(dataset, output_file):
	# Import the corpus reader
	corpus_root = '/home1/c/cis530/data-hw2/'+dataset
	# Define the folder where the files are situated
	files_dataset = PlaintextCorpusReader(corpus_root, '.*')
	# Open the output_file
	output = open('/home1/c/cis530/data-hw2/'+output_file,'w')
	# Read the stopwlist
	stop_list = open('/home1/c/cis530/data-hw2/'+'stopwlist.txt').read()
	types_stop_list=stop_list.split()
	for fileid in files_dataset.fileids():
		# Output the docid
		output.write(dataset+'/'+fileid+' ')
		# Output the topic_name
		topic_name=fileid.split('/')[0]	
		output.write(topic_name+' ')
		# Output the num_tokens	
		tokens=files_dataset.words(fileid)
		output.write('tok:'+str(len(tokens))+' ')
		# Output the num_types
		types=set(tokens)
		output.write('typ:'+str(len(types))+' ')
		# Output the num_contents
		output.write('con:'+str(len([w for w in tokens if w not in types_stop_list]))+' ')
		# Output the num_sents
		sents = files_dataset.sents(fileid)
		output.write('sen:'+str(len(sents))+' ')
		# Output the avg_slen
		avg_slen=round(float(len(tokens))/float(len(sents)),2)
		output.write('len:'+str(avg_slen)+' ')
		# Output the num_caps
		output.write('cap:'+str(len([w for w in tokens if w[0]>='A' and w[0]<='Z'])))
		output.write('\n')
	output.close()
Example #4
0
def setData(domain):
	
	# domain variable can take one of the following values
	#
	# "chicago_crime_data",
	# "economics",
	# "software_vulnerability",
	# "cyber_threat",
	# "articles",
	# "msds"

	
	corpus_root = getRoot(domain)					# based on the selected domain corpus root will hold the relative address of the corpus
	wordlists = PlaintextCorpusReader(corpus_root, '.*')		# NLTK's laintextCorpusReader load text files in the root
	words = wordlists.words()					# and extract all the words in each file 

	my_stopwords = nltk.corpus.stopwords.words('english')		# my_stopwords holds a list of non-relevant (stop) words in english
	content = [w for w in words if w.lower() not in my_stopwords]	# stop words are removed
	content = [w for w in content if len(w) > 2]			# words shorther than two(2) characters are removed
	content = [w for w in content if not w.isdigit()]		# digit only words (e.g. "10", "30", "450") are removed

	result = {}							
	
	# a list of related words is created for each word in the content variable
	
	for word in content:						
		result[word] = []
		for sset in wn.synsets(word):				# the first synonym of a set is selected, this can be expanded to the rest of the words in the set for more accuracy but at the cost of performace
			for synset in sset.hyponyms():			# a set of hyponyms is added for the main synonym
				result[word].append(synset.name[0:synset.name.find('.')])

	return result,content # both the synonyms and the original word corpus is returned 
 def plot_cfreq(self,corpus,patt,n):
     wordlists = PlaintextCorpusReader(corpus,patt)
     fileids = wordlists.fileids()
     for id in fileids:
         words = wordlists.words(id)
         fre = FreqDist(word.lower() for word in words if word.isalpha()) 
     return fre.plot(n,cumulative=True)
Example #6
0
def tokenisation (path):
    tokens = []
    min_length = 3
    for dirs in os.walk(path):
        corpus_root = dirs[0] #parcour l'arborescence du chemin
        if corpus_root != path:
            textlist = PlaintextCorpusReader(corpus_root,'.*')
            for files in textlist.fileids():
                test= corpus_root + '/' + files
                fs = open(test,'r')
                texte=fs.readlines()
                texte=str(texte)
                words = map(lambda word: word.lower(), wordpunct_tokenize(texte))
                j=0
                while j<len(words):
                    if words[j] not in cachedStopWords:
                        tokens.append(words[j] )
                    j+=1
                fs.close() 
    p = re.compile('[a-zA-Z]+')
    tokens_filtered = filter(lambda token: p.match(token) and len(token)>= min_length, tokens)

#    vocab = []
#    for words in tokens_filtered:
#        vocab.append(SnowballStemmer("english").stem(words))
    
#    tokens_filtered_sans = set(vocab)
    tokens_filtered_sans = set(tokens_filtered)
    tokens_filtered_sans = list(tokens_filtered_sans)
    
    return tokens_filtered_sans
Example #7
0
def similar (text, word):
    if re.match ("^[a-zA-Z0-9_\(\),\.]+$", text) and re.match ("^[a-zA-Z0-9_]+$", word):
        text = '%s.txt' % text
        
        f = open(os.path.join(CORPUS_ROOT, text), 'r')
        source = f.read()
        f.close()
        
        corpus = PlaintextCorpusReader(CORPUS_ROOT, [text])
        n_text = nltk.text.Text(corpus.words(text))
        context_index = nltk.text.ContextIndex(n_text.tokens, filter=lambda x:x.isalpha(), key=lambda s:s.lower())
        word = word.lower()
        wci = context_index._word_to_contexts
        result = []
        
        if word in wci.conditions():
            contexts = set(wci[word])
            fd = nltk.probability.FreqDist(w for w in wci.conditions() for c in wci[w] if c in contexts and not w == word)
            words = nltk.util.tokenwrap(fd.keys()[:20])
            
            for middle_word in words.split(' '):
                for context in contexts:
                    if re.search ("/" + context[0] + "(\W|\s)+" + middle_word + "(\W|\s)+" + context[1] + "/i", source) != 'none':
                        print (context[0], middle_word, context[1])
                        result.append ({'word': word, 'context_left': context[0], 'context_right': context[1]})
            
        return dumps ({'name': text, 'word': word, 'result': result})    
 def tokenize_report_sents(self, report_of_the_time):
     re = ReportEnviroments()
     new_corpus_reports_fileids_list = PlaintextCorpusReader(re.original_reports_corpus_path, '.*')
     raw_text = new_corpus_reports_fileids_list.raw(report_of_the_time)
     sentencas_raw = sent_tokenize(raw_text)
     original_report_path = str(new_corpus_reports_fileids_list.abspath(report_of_the_time))
     return sentencas_raw, original_report_path, report_of_the_time
Example #9
0
    def extractWordsOnly(self, article):
        templist = []
        listtextstring = []
        articlename = article + '.txt'
        #corpus_root = '/home/jesal/onedump/'
        wl = PlaintextCorpusReader(corpus_root, '.*')
        allwords = wl.words(fileids = articlename)
        exturllist = self.extractexternalURL(article)
        textstring = wl.raw(articlename)
        for item in exturllist:
            textstring = textstring.replace(item,' ')
    

        
        #templist = re.sub(r'[.!,;?]', ' ', textstring).split()
        templist = nltk.word_tokenize(textstring)
        listtemp = []
        for i in templist:
        	j = re.sub('[^A-Za-z]+', '', i)
        	listtemp.append(str(j))
		    
		    
		    
		    
        templistfinal = []
        templistfinal= self.removeEmpty(listtemp)
        return templistfinal
Example #10
0
def extractParasInList(name):
    corpuslocation ='/Users/anis/seniorProject/aligned Paragraphs/algebra'
    reader = PlaintextCorpusReader(corpuslocation, '.*\.txt')
    # This gives the list of paragraphs. every paragraph list contains ist of sentences
    # So it is a list of lists. Bunch of sentenses as a list joins together to make  		#lists of pararagraph
    pList = []
    paragraphlist =  reader.paras(name) #'simpleTuring.txt'
    numpara = len(paragraphlist)
    for sentlist in paragraphlist:
        #print sentlist
        numsent = len(sentlist)
        #print type(sentlist),
        #print numsent
     	paraAsAList = []
     	# this loops through all the sentence lists and make them one list'''
        for i in range(numsent):
        		paraAsAList = paraAsAList + sentlist[i]	
        #print paraAsAList # this is the whole parapragph as one list
     	paraAsAString = ""
     	for word in paraAsAList:
        		paraAsAString = paraAsAString + word + str(" ")
        #print paraAsAString
        pList.append(paraAsAString)
        #print len(pList)
    return pList
 def extract_related_terms(self):
     re = ReportEnviroments()
     new_corpus_clusters_fileids_list = PlaintextCorpusReader(re.cluster_corpus_path, '.*')
     raw_text_list = []
     for i in range(len(new_corpus_clusters_fileids_list.fileids())):
         raw_text_list.extend([[new_corpus_clusters_fileids_list.raw(fileids=new_corpus_clusters_fileids_list.fileids()[i])]])
     return raw_text_list
def corpus_from_directory(path, filetype='.*'):
	'''
	Make a corpus of all files in a given directory. Can limit type by passing
	the desired extension, proper format is, e.g., '.*\.txt'
	'''
	corpus_reader = PlaintextCorpusReader(path, filetype)
	return nltk.Text( corpus_reader.words() )
def get_lm_features(dataset,output_file):
        # Import the corpus reader
	corpus_root = '/home1/c/cis530/data-hw2/'+dataset
	# Define the folder where the files are situated
	files_dataset = PlaintextCorpusReader(corpus_root, '.*')	
        fin_model = BigramModel('Finance',corpus_root)
        hel_model = BigramModel('Health',corpus_root)
        res_model = BigramModel('Computers_and_the_Internet',corpus_root)
        co_model = BigramModel('Research',corpus_root)
        output = open('/home1/c/cis530/data-hw2/'+output_file,'w')
        for fileid in files_dataset.fileids():
		# Output the docid
		output.write(dataset+'/'+fileid+' ')
		# Output the topic_name
		topic_name=fileid.split('/')[0]
		output.write(topic_name+' ')		
		word_list = files_dataset.words(fileid)
		finprob,finper = fin_model.get_prob_and_per(word_list)		
		hlprob,hlper = hel_model.get_prob_and_per(word_list)	
		resprob,resper = res_model.get_prob_and_per(word_list)
		coprob,coper = co_model.get_prob_and_per(word_list)
		output.write('finprob:'+str(round(finprob,1))+' ')
		output.write('hlprob:'+str(round(hlprob,1))+' ')
		output.write('resprob:'+str(round(resprob,1))+' ')
		output.write('coprob:'+str(round(coprob,1))+' ')
		output.write('finper:'+str(round(finper,1))+' ')
		output.write('hlper:'+str(round(hlper,1))+' ')
		output.write('resper:'+str(round(resper,1))+' ')
		output.write('coper:'+str(round(coper,1))+' ')
		output.write('\n')
        output.close()
Example #14
0
class Document(object):
    """
    A container object for a set of chapters.

    This allows us to keep track of document frequencies when computing them the
    first time so we don't repeat computations for common words. It also handles
    the PlaintextCorpusReader functions for us.
    """

    def __init__(self, chapter_paths):
        """
        Create a new Document.

        chapter_paths - A list of the paths for chapters in the document.
        """
        self.corpus = PlaintextCorpusReader("", chapter_paths)
        self.chapter_lists = self._sanitize_chapters()
        self.chapter_dists = [(FreqDist(chapter), chapter) for chapter in
                self.chapter_lists]
        self.words = {}

    def get_chapters(self):
        return self.chapter_lists

    def average_chapter_frequency(self, word):
        freqs = []
        if word in self.words:
            return self.words[word]
        else:
            for (dist, wordlist) in self.chapter_dists:
                freqs.append(dist[word]/float(len(wordlist)))

            # Store and return the average frequency
            avg_frq = mean(freqs)
            self.words[word] = avg_frq
            return avg_frq

    def _sanitize_chapters(self):
        # Sanitize the wordlists and return them
        lists = [self.corpus.words(file_id) for file_id in
                self.corpus.fileids()]

        new_lists = []

        for word_list in lists:
            # Convert everything to lowercase (e.g. so "the" and "The" match)
            word_list = [word.lower() for word in word_list]
            # Remove any punctuation
            word_list = [re.sub('\p{P}','',word) for word in word_list]
            # Remove stopwords, punctuation, and any empty word
            stops = stopwords.words('english')
            stops.append('')
            stops.append('said')
            word_list = [word for word in word_list if (word not in stops and
                word.isalpha())]

            new_lists.append(word_list)

        return new_lists
def main():
    
    # Corpus Location
    #for training data
    posTrainCorpus = 'C:/Users/Abhinav/Desktop/Course work/NLP/txt_sentoken/pos_train'
    negTrainCorpus = 'C:/Users/Abhinav/Desktop/Course work/NLP/txt_sentoken/neg_train'

    #for test data
    posTestCorpus = 'C:/Users/Abhinav/Desktop/Course work/NLP/txt_sentoken/pos_test'
    negTestCorpus = 'C:/Users/Abhinav/Desktop/Course work/NLP/txt_sentoken/neg_test'

    # Create Plain Text Corpus for training data
    posCorpus = PlaintextCorpusReader(posTrainCorpus, '.*')
    negCorpus = PlaintextCorpusReader(negTrainCorpus, '.*')


    # Create Plain Text Corpus for test data
    posTstCorpus = PlaintextCorpusReader(posTestCorpus, '.*')
    negTstCorpus = PlaintextCorpusReader(negTestCorpus, '.*')
    
    #GetBigrams
    posBigrams = nltk.bigrams(posCorpus.words())
    negBigrams = nltk.bigrams(negCorpus.words())

    #Get no. of words per corpus
    posWordLen = len(posCorpus.words())
    negWordLen = len(negCorpus.words())
    
    # Creating object of Lang_Model_classifier
    obj1 = Lang_Model_Classifier()
    obj1.freq_dst(posCorpus, negCorpus)
    
    #For negative test data
    for filename in os.listdir(negTestCorpus):
        wordSet =  negTstCorpus.words(filename)
    
        print '**Unigram**'
        unigr = obj1.perp(wordSet)
    
        print unigr
    
        print '**Bigram**'
        bigr = obj1.perpBi(nltk.bigrams(wordSet))
    
        print bigr
        
    #For positive test data    
    for filename in os.listdir(posTestCorpus):
        wordSet2 =  posTstCorpus.words(filename)
    
        print '**Unigram**'
        posunigr = obj1.perp(wordSet2)
    
        print posunigr
    
        print '**Bigram**'
        posbigr = obj1.perpBi(nltk.bigrams(wordSet2))
    
        print posbigr
Example #16
0
def hybrid_cfdist():
    sherlock_corpus = PlaintextCorpusReader(CORPUS_ROOT_SHERLOCK, '.*', encoding='utf-8')
    sherlock_bigrams = nltk.bigrams(sherlock_corpus.words())

    pokemon_corpus = PlaintextCorpusReader(CORPUS_ROOT_POKEMON, '.*', encoding='utf-8')
    pokemon_bigrams = nltk.bigrams(pokemon_corpus.words())

    return nltk.ConditionalFreqDist(sherlock_bigrams + pokemon_bigrams)
Example #17
0
def main():
    corpus_root = '../posts/'
    newcorpus = PlaintextCorpusReader(corpus_root, '.*',
                                      para_block_reader=read_block_no_metadata)
    corpus_words = [w.lower() for w in newcorpus.words() if w.isalpha()]
    corpus_sentences = newcorpus.sents()
    analyst = TextAnalyst(corpus_words, corpus_sentences, 'french')
    analyst.print_analyze()
def get_sub_directories(directory):
    files = PlaintextCorpusReader(directory, ".*")
    dirs = list()
    for f in files.fileids():
        if "/" in f:
            if (f[:f.index("/")] not in dirs):
                dirs.append(f[:f.index("/")])
    return dirs
 def raw_text_convertion(self, page1, page2, convertion_style):
     if self.doc_ext == '.pdf':
         system("pdftotext %s -enc UTF-8 -f %i -l %i %s.pdf %s.txt"
             %(convertion_style, page1, page2, self.doc_dir, self.doc_dir))
     raw_text = PlaintextCorpusReader(dirname(self.doc_dir), self.temp_text_doc).raw()
     encoded_lowertext = raw_text.encode('utf-8').lower()
     self.raw_text = re.sub(r'[0-9]', '', encoded_lowertext)
     return self.raw_text
Example #20
0
def get_corpus(corpusdir):
	newcorpus = PlaintextCorpusReader(corpusdir, '.*')
	titles = newcorpus.fileids() # returns all the .txt files in the dir
	words = []
	for title in titles:
		newcorpus_txt = newcorpus.words(title)
		words.extend([ e for e in newcorpus_txt if re.match(r"[aA-zZ]",e)])
	
	return words
Example #21
0
def raw():
	"""
		Returns raw text of corpus
		
		>>> raw()[:54]
		'#                                                 DELM'
	"""		
	wordlists = PlaintextCorpusReader(bijankhan_root, bijankhan_fileid)
	return wordlists.raw(bijankhan_fileid)
def extractingFromFolders():
    folder2 = os.path.expanduser('~\\My Documents\\Tara\\Ongoing\\CharacterCorpus\\Reference')
    fileresult = os.path.expanduser('~\\My Documents\\Tara\\Ongoing\\CharacterCorpus\\results.txt')
    refer = PlaintextCorpusReader(folder2, 'harrygrepster.txt')
    grepster = refer.words()
    results = open(fileresult, 'a')
    completeWords = wordlist.words()
    stoppers = stopwords.words()
    return grepster, results, completeWords, stoppers
Example #23
0
def GetTweets():
    corpusdir = 'DB/'

    newCorpus = PlaintextCorpusReader(corpusdir, '.*\.txt$') #Regex allows you to ignore .DS_Store

    pattern = '\r\n' #Regex accepts \r\n as the next line encoding in each 'tweet' in the database
    tweets = nltk.regexp_tokenize(newCorpus.raw(), pattern, gaps=True) #iterate through list, creating 'tweets'
    tweets = [x.lower() for x in tweets] #make all strings lowercase to make matching easier
    return tweets
Example #24
0
def process_nan():
    corpus_root = '../nan_samples/'
    library = PlaintextCorpusReader(corpus_root, '.*', encoding='utf-8')
    tokens = nltk.word_tokenize(library.raw())
    tokens = map(lambda x: process_element(x), tokens)
    nan_tokens=[]
    for i in tokens:
        nan_tokens+=i.split(' ')
    return nan_tokens
Example #25
0
	def raw(self, fileid):
		"""
			Returns raw text of fileid
			
			>>> hr.raw('1996/HAM2-960622.xml')[:38]
			'<?xml version="1.0" encoding="UTF-8"?>'
		"""		
		wordlists = PlaintextCorpusReader(self.hamshahri_root, fileid)
		return wordlists.raw(fileid)
Example #26
0
def read_BNC_baby_stem(root_local):
	global fdist
	BNC_baby = []
	stemmer = SnowballStemmer("english")
	wordlists = PlaintextCorpusReader(root_local, '.*', encoding='latin-1')
	for word in wordlists.words():
		BNC_baby.append(stemmer.stem(word))
	fdist = FreqDist(word.lower() for word in BNC_baby)
	return(fdist)
Example #27
0
def compare(request):
    errors = []
    statistics=[]
    stats=[]
    for x in range(1,3):
           cantoname = "canto"+str(x)+".txt"
           w=PlaintextCorpusReader("./",cantoname);
           w.words();
           t=nltk.text.Text(w.words());
           l_lines=len(line_tokenize(w.raw()))
           l_uwords=len(set(w.words()))
           l_words=len(w.words())
           l_sents=len(w.sents())
           l_paras=len(w.paras())
           l_linperpara=l_lines/l_paras
           statistics.append(x)
           statistics.append("Number of Words - "+ str(l_words))
           statistics.append("Number of Unique Words - "+ str(l_uwords))
           statistics.append("Number of Setences - "+ str(l_sents))
           statistics.append("Number of Lines - "+ str(l_lines))
           statistics.append("Number of Paras - "+ str(l_paras))
           statistics.append("Number of Lines/Paras - "+ str(l_linperpara))
           lexical_density=l_words/l_uwords
           l_wordpersent = l_words/l_sents
           statistics.append("Lexical Density (Total/Uniq) words- "+ str(lexical_density))
           statistics.append("Words per sentence - "+ str(l_wordpersent))
           stats.append(statistics)
           
    return render_to_response('compare.html', {'stats':statistics})
Example #28
0
def stats(request):
    errors = []
    statistics=[]
    if 'q' in request.GET:
        q = request.GET['q']
        if not q:
            errors.append('Enter a Canto Number')
        else:
           cantoname = "canto"+q+".txt"
           w=PlaintextCorpusReader("./",cantoname);
           w.words();
           t=nltk.text.Text(w.words());
           l_lines=len(line_tokenize(w.raw()))
           l_uwords=len(set(w.words()))
           l_words=len(w.words())
           l_sents=len(w.sents())
           l_paras=len(w.paras())
           l_linperpara=l_lines/l_paras
           statistics.append("Number of Words - "+ str(l_words))
           statistics.append("Number of Unique Words - "+ str(l_uwords))
           statistics.append("Number of Setences - "+ str(l_sents))
           statistics.append("Number of Lines - "+ str(l_lines))
           statistics.append("Number of Paras - "+ str(l_paras))
           statistics.append("Number of Lines/Paras - "+ str(l_linperpara))
           lexical_density=l_words/l_uwords
           l_wordpersent = l_words/l_sents
           statistics.append("Lexical Density (Total/Uniq) words- "+ str(lexical_density))
           statistics.append("Words per sentence - "+ str(l_wordpersent))
           return render_to_response('stats.html', {'statistics':statistics})
    return render_to_response('stats.html', {'errors': errors})
Example #29
0
def big_event_sentences():
	corpus_root = '../Brazil_NightClub_Fire/'

	wordlists = PlaintextCorpusReader(corpus_root, '.*\.txt')

	BigEvent = wordlists.raw()

	sent_tokenizer = nltk.data.load('../nltkData/tokenizers/punkt/english.pickle')

	BigEventSentences = sent_tokenizer.tokenize(BigEvent)
	return BigEventSentences
Example #30
0
def class_event_sentences():
	corpus_root = '../Islip13Rain/'

	wordlists = PlaintextCorpusReader(corpus_root, ".*\.txt")

	ClassEvent = wordlists.raw()

	sent_tokenizer = nltk.data.load('../nltkData/tokenizers/punkt/english.pickle')

	ClassEventSentences = sent_tokenizer.tokenize(ClassEvent)
	return ClassEventSentences
Example #31
0
def dict_freq_mot_classe(classe, corpus):
    dictio = {}
    for dirs in os.walk(corpus):
        corpus_root = dirs[0]  #parcour l'arborescence du chemin
        if corpus_root != corpus:
            if os.path.basename(corpus_root) == classe:
                textlist = PlaintextCorpusReader(corpus_root, '.*')
                for files in textlist.fileids():
                    test = corpus_root + '/' + files
                    l = split(test)
                    d = freq_mot(l)
                    for mot in d:
                        if mot not in dictio:
                            dictio[mot] = 0
                        dictio[mot] = dictio[mot] + d[mot]

    return dictio
 def __init__(self, value, junk=junk_words, p=programming_skills):
     self.value = value
     self.junk_words = junk
     self.programming = p
     root = os.getcwd() + '/output/' + self.value
     files = self.value + '.*'
     text = PlaintextCorpusReader(root, files)
     self.corpus = text
Example #33
0
def prepData(setdir):
    cor = []
    senti = setdir[-3:]
    files = ".*.txt"
    corpustemp = PlaintextCorpusReader(setdir, files)
    corpus = nltk.Text(corpustemp.words())
    corpus = [x.lower() for x in corpus
              ]  # eliminate counting same word twice due to case sensitivity

    # stemming -> take root stem of words
    stemmer = PorterStemmer()
    stemmed_corpus = []
    for x in corpus:
        stemmed_corpus_elm = x
        stemmed_corpus.append((stemmed_corpus_elm, senti))

    return stemmed_corpus
Example #34
0
def get_feature_set_tuples(feature_file):
    tuple_list = []
    path = feature_file.split('/')
    file_name = path[(-1):]
    root = ''
    for i in range(len(path)-1):
        root=root + path[i] + '/'
    feature_file = PlaintextCorpusReader(root, file_name)
    features = feature_file.words()
    for each in features:
        if(each[:2] == 'NN' or each[:2] =='VV'):
            tuple_list.append((each[2:], each[:2]))
        elif(each[:3] =='ADJ' or each[:3]=='ADV'):     
            tuple_list.append((each[3:], each[:3]))
        elif(each[:4]=='PREP'):
            tuple_list.append((each[4:], each[:4]))
    return tuple_list
Example #35
0
def Load_RawText_Collection(root):
    raw_text_dict = {}
    corpus_root = root
    wordlists = PlaintextCorpusReader(corpus_root, '.*\.txt')
    porter = nltk.PorterStemmer()
    stopwords = nltk.corpus.stopwords.words('english')
    for each_file in wordlists.fileids():
        content_list = []
        for each_sent in wordlists.sents(each_file):
            sent_list = []
            for w in each_sent:
                if len(w) > 2 and w.isalpha() and w not in stopwords:
                    sent_list.append(porter.stem(w.lower()))
            if len(sent_list) > 0:
                content_list.append(each_sent)
                raw_text_dict[each_file] = content_list
    return raw_text_dict
Example #36
0
 def my_bar(self, corpus, patt, n):
     wordlists = PlaintextCorpusReader(corpus, patt)
     fileids = wordlists.fileids()
     k = len(fileids)
     fig1 = pylab.figure(1)
     fig2 = pylab.figure(2)
     fig3 = pylab.figure(3)
     fig4 = pylab.figure(4)
     for id in fileids:
         i = fileids.index(id) + 1
         words = wordlists.words(id)
         fre = FreqDist(word.lower() for word in words if word.isalpha())
         self.bar_per(fre, n, fig1, 2 * k, 2 * i, id)
         self.bar_count(fre, n, fig2, 2 * k, 2 * i, id)
         self.bar_freq(fre, n, fig3, 2 * k, 2 * i, id)
     fig4 = self.plot_cfreq(corpus, patt, n)
     pylab.show()
Example #37
0
def getMarkovBatch():
    last_word_sentences = defaultdict(list)
    markov_lyrics.markov()
    corpus_root = '/Users/divyasingh/Documents/MABLE/Story_telling_lyrics/words/corpus'
    wordlists = PlaintextCorpusReader(corpus_root, '.*')
    mega_sentences = (wordlists.sents('lyrics_batch.txt'))
    if mega_sentences:
        for sentence in mega_sentences:
            lw = last_word(sentence)
            last_word_sentences[ lw ].append(sentence)
        keys = last_word_sentences.keys()
    else:
        getMarkovBatch()
    # print "#############################################"
    # print "#############################################"
    # print keys, last_word_sentences
    return keys, last_word_sentences
Example #38
0
class TextCollection:
    def __init__(self):
        # Create a Corpus with all the data preprocessed with exercise 2 tokenizer
        self.corpus = PlaintextCorpusReader(corpusdir, '.*/*', word_tokenizer=preprocess)
        # Create the vectorial Space, creating each Vector
        self.Text_vectors = []
        for document in self.corpus.fileids():
            self.Text_vectors.append(TextVector(document))
Example #39
0
def get_coarse_level_features(dataset, output_file):
# accessing the corpus
    corpus_root = '/home1/c/cis530/data-hw2/' 
    dataset_path = corpus_root + dataset

# Reading the files from the directories
    files = PlaintextCorpusReader(dataset_path, '.*')
    ids = files.fileids()
    stopFile = PlaintextCorpusReader(corpus_root, 'stopwlist.txt')
    stops = stopFile.words()

#Opening a file that has to be written to
    out = open(output_file, 'w')

    for i in range(0,len(ids) - 1):
#Initializing certain variables
        tokens_count=0
        types = 0
        non_stops_count=0
        sents_count = 0
        avg_sent_len=0
        cap_count = 0

        tokens=files.words(ids[i])
#Computing Number of Tokens
        tokens_count = len(tokens)

#Computing Number of types
        types = len(set(tokens))
        non_stops=[]

#Computing Number of Content Words
        for t in tokens:
            if t not in stops:
                non_stops.append(t)
        non_stops_count = len(non_stops)

#Finding Average Sentence Length
        sent = []
        sent = files.sents(ids[i])
        sents_count = len(sent)
        sent_len=0
        for s in sent:
            sent_len = sent_len + len(s)
        avg_sent_len = sent_len/float(sents_count)

#Computing Number of Captilized Words
        for c in non_stops:
            if c.istitle():
                cap_count = cap_count+1
        current_file = dataset + '/' + ids[i]
        e = current_file.split('/')
        out.write(current_file +' '+ e[-2] + ' tok:' + str(tokens_count) + ' typ:' + \
str(types) + ' con:' + str(non_stops_count) + ' sen:' + str(sents_count) + ' len:' + str(avg_sent_len) + ' cap:' + str(cap_count)+ '\n')
        out.flush()
Example #40
0
 def my_count(self, corpus, patt, n, filename):
     wordlists = PlaintextCorpusReader(corpus, patt)
     fileids = wordlists.fileids()
     res = []
     for id in fileids:
         leng = len(wordlists.words(id))
         wordc = len(set(wordlists.words(id)))
         wor = "=> num corpus words: " + ` leng `
         dis = "=> num distinct words: " + ` wordc `
         ric = "=> ind lex richness: " + ` leng / wordc `
         res.append(dis)
         res.append(ric)
         res.append(wor)
         for word in sorted(set(wordlists.words(id))):
             freq = (wordlists.words(id)).count(word)
             f = "(" + word.lower() + "," + ` round(100 * (freq / leng),
                                                    1) ` + ")"
             t = "(" + word.lower() + "," + ` freq ` + "/" + ` leng ` + ")"
             res.append(f)
             res.append(t)
     out = open("/home/camilo/" + filename, "w")
     try:
         for t in res[:n]:
             out.write(t + "\n")
     finally:
         out.close()
Example #41
0
def test_corpus_reader():
    본문 = """세종(世宗, 1397년 5월 7일 (음력 4월 10일) ~ 1450년 3월 30일(음력 2월 17일), 재위 : 1418년 ~ 1450년)은 조선의 제4대 국왕이며 언어학자이다. 그의 업적에 대한 존경의 의미를 담은 명칭인 세종대왕(世宗大王)으로 자주 일컬어진다.
성은 이(李), 휘는 도(祹), 본관은 전주(全州), 자는 원정(元正), 아명은 막동(莫同)이었다. 묘호는 세종(世宗)이며, 시호는 영문예무인성명효대왕(英文睿武仁聖明孝大王)이고, 명나라에서 받은 시호는 장헌(莊憲)이었다. 존시를 합치면 세종장헌영문예무인성명효대왕(世宗莊憲英文睿武仁聖明孝大王)이다."""

    # 임시 파일 생성
    with tempfile.NamedTemporaryFile(mode='w+', encoding='utf8') as fp:
        fp.write(본문)
        fp.seek(0)
        파일경로 = fp.name
        폴더 = os.path.dirname(파일경로)
        파일명 = os.path.basename(파일경로)
        # # 말뭉치 리더 생성
        reader = PlaintextCorpusReader(root=폴더,
                                       fileids=[파일명],
                                       word_tokenizer=형태소_분석기)
        # 형태소 분리 확인
        분석결과 = reader.words()
        assert 분석결과
Example #42
0
def main():
    options = get_options()
    if not os.path.isdir(options.output):
        os.makedirs(options.output)

    wordlists = PlaintextCorpusReader(options.input, '.*\.txt$')

    stopwords = list()
    if options.stopwords:
        stopwords = get_stopwords(options.stopwords)

    for fileid in wordlists.fileids():
        results = analyze_text(wordlists.words(fileid), fileid, stopwords,
                               options.min_length, options.min_freq,
                               options.total_ngrams, options.min_measure,
                               options.bigrams_only, options.trigrams_only)
        write_results(results, os.path.join(options.output, fileid),
                      options.bigrams_only, options.trigrams_only)
Example #43
0
 def occStats(self,path,format,list,plotting):
     wordlists = PlaintextCorpusReader(path,format)
     fileids = wordlists.fileids()
     k = len(fileids)
     
     # computing rel frequencies
     self.fileStats(path,fileids)
     
     # plotting vars
     figname = "GQs by class"
     figpath = plotting +'/'+ figname.replace(' ', '-') + '-stats.pdf'
     savpath = plotting +'/'+ figname.replace(' ', '-')
     
     # plotting
     MyPlot(self.stats,self.classstats,figname, "three",plotting,list) # per class (no regression)
     
     # generating report
     SaveStats(self.classstats,self.stats,figpath,savpath,plotting) # per class
Example #44
0
class Song(TextCollection):
    def __init__(self, fileid, title=None):
        self.title = title
        self.fileid = fileid
        self.corpus = PlaintextCorpusReader('.',
                                            fileid,
                                            word_tokenizer=tokenizer)
        super().__init__(self.corpus.words())

    def __repr__(self):
        title = self.corpus.sents()[0]
        return "<Song " + ' '.join(title) + ">"

    def __eq__(self, other):
        return self.fileid == other.fileid

    def __hash__(self):
        return hash(self.fileid)
Example #45
0
 def load_corpus(self, corpus_root, regex='.*', encoding='utf8'):
     '''
     返回自己的语料库
     corpus_root:语料所在的目录
     regex:正则,默认导入目录下所有文件
     '''        
     from nltk.corpus import PlaintextCorpusReader
     wordlists = PlaintextCorpusReader(corpus_root, regex, encoding= encoding)
     return wordlists
Example #46
0
    def create_model(self, corpus_name):
        print('reading corpus')
        reader = PlaintextCorpusReader(CORPUS_DIR, corpus_name)

        print('padded everygram pipeline')
        train_data, vocab = padded_everygram_pipeline(self.ngram,
                                                      (reader.sents()))

        print('creando modelo')
        # Crea modelo
        model = MLE(self.ngram)

        print('ajustando datos')
        # Ajusta a los datos
        model.fit(train_data, vocab)
        print('ajusto')

        return model
Example #47
0
def occurrence_mot_i_corpus(mot, corpus):
    compteur = 0
    for dirs in os.walk(corpus):
        corpus_root = dirs[0]  #parcour l'arborescence du chemin
        if corpus_root != corpus:
            textlist = PlaintextCorpusReader(corpus_root, '.*')
            for files in textlist.fileids():
                test = corpus_root + '/' + files
                x = open(test, 'r')
                for ligne in x:
                    lign = ligne.split()
                    for mt in lign:
                        mt = mot_mot(alphabet, mt)
                        if mt == mot:
                            #                    if ligne.find(mot)>0:
                            compteur += 1
                x.close()
    return compteur
Example #48
0
    def occStats(self,path,format,list,plotting):
        wordlists = PlaintextCorpusReader(path,format)
        fileids = wordlists.fileids()
        k = len(fileids)

        # computing rel frequencies
        self.fileStatsCum(path,fileids)

        # plotting vars (2)
        fignameCum = "Base GQs"
        figpathCum = plotting +'/'+ fignameCum.replace(' ', '-') + '-stats.pdf'
        savpathCum = plotting +'/'+ fignameCum.replace(' ', '-')

        # plotting
        MyPlot(self.statsCum,self.classstatsCum,fignameCum, "three",plotting,list) # cum

        # generating report
        SaveStats(self.classstatsCum,self.statsCum,figpathCum,savpathCum,plotting) # cum
def get_pos_features(dataset,feature_set_file,output_file):
	# Import the corpus reader
	corpus_root = '/home1/c/cis530/data-hw2/'+dataset
	# Define the folder where the files are situated
	files_dataset = PlaintextCorpusReader(corpus_root, '.*')
	feature_list = open('/home1/c/cis530/data-hw2/'+feature_set_file).read().split()
	output = open('/home1/c/cis530/data-hw2/'+output_file,'w')
	NOUNS=['NN','NNS','NP','NPS']
        VERBS=['VB','VBD','VBG','VBN','VBP','VBZ']
        ADJECTIVE=['JJ','JJR','JJS']
        ADVERB=['RB','RBR','RBS']
        PREPOSITION=['IN']	
	for fileid in files_dataset.fileids():
		# Output the docid
		output.write(dataset+'/'+fileid+' ')
		# Output the topic_name
		topic_name=fileid.split('/')[0]
		output.write(topic_name+' ')
		pos_tag_tuple = nltk.pos_tag(files_dataset.words(fileid))
		# convert the tuple to list so that it can be changed
		pos_tag_list=[list(pos_tag_tuple[i]) for i in range(len(pos_tag_tuple))]
		# retag the POS and replace the word with tag_word
		for w in pos_tag_list:
			if w[1] in NOUNS:
				w[0]='NN'+w[0]
				w[1]='NN'
			elif w[1] in VERBS:
				w[0]='VV'+w[0]
				w[1]='VV'
			elif w[1] in ADJECTIVE:
				w[0]='ADJ'+w[0]
				w[1]='ADJ'
			elif w[1] in ADVERB:
				w[0]='ADV'+w[0]
				w[1]='ADV'
			elif w[1] in PREPOSITION:
				w[0]='PREP'+w[0]
				w[1]='PREP'
		tw_list=[w[0] for w in pos_tag_list]		
		fd=FreqDist(tw_list)
		for tw in feature_list:
                        output.write(tw+':'+str(fd[tw])+' ')
		output.write('\n')
	output.close()
Example #50
0
def get_tfidf_featureset(
    corpus_root="/Users/bryceanderson/Desktop/brosse/TwitterStuff",
    idf_range=(.3,.7), rebuild=False):

    if not rebuild and os.path.isfile("tfidf.pickle"):
        print("* Returning pickled feature set *")
        with open("tfidf.pickle","rb") as f:
            return pickle.load(f)

    DemCorpus = PlaintextCorpusReader("./Democrat",".*\.txt")
    RepCorpus = PlaintextCorpusReader("./Republican",".*\.txt")
    FullCorpus = PlaintextCorpusReader(".", "(Republican|Democrat)/.*\.txt")

    print("processing dems...")
    demStems = classifier.get_stems_dict(DemCorpus,True)
    print("processing reps...")
    repStems = classifier.get_stems_dict(RepCorpus,True)

    print("computing IDFs...")
    fullStems = demStems.copy()
    fullStems.update(repStems)
    stemSets = { f:set(sl) for f,sl in fullStems.items() }
    listOfAllStems = list(set.union(*stemSets.values()))

    idfs = pd.Series({ stem: 
        sum([ stem in stem_list for _,stem_list in stemSets.items() ]) 
        for stem in listOfAllStems }) / len(FullCorpus.fileids())
    idfs = idfs[idfs.between(*idf_range)]
    #print(len(idfs))
    #idfs.sort_values(ascending=False, inplace=True)
    #idfspoints = idfs[0:100]
    #idfspoints.plot(kind='bar', figsize=(30,30), fontsize=28)
    print("computing TFs...")
    featureset = []
    for demStemDict in demStems.values():
        featureset.append((compute_tfidf(demStemDict, idfs), 'D'))
    for repStemDict in repStems.values():
        featureset.append((compute_tfidf(repStemDict, idfs), 'R'))

    print("* Pickling feature set *")
    with open("tfidf.pickle","wb") as f:
        pickle.dump((featureset,idfs),f)

    return (featureset, idfs)
Example #51
0
def get_CIQ_wordVector():
    wordlists = PlaintextCorpusReader(root_path + "实验文件", fileids='.*\.tsv')
    words = wordlists.words("CIQ_traindata_contents.tsv")
    model = gensim.models.KeyedVectors.load_word2vec_format(
        root_path + 'GoogleNews-vectors-negative300.bin', binary=True)

    vocabulary = model.wv.vocab.keys()
    file_object = open(root_path + "实验文件/CIQ_wordVector.tsv",
                       encoding='UTF-8',
                       mode='w')
    file_object.write(f"{len(vocabulary)}\t{300}\n")
    for w in words:
        if len(w) != 20 and w in vocabulary:
            vector_list = model.get_vector(w).tolist()
            file_object.write(w + "\t")
            for index in range(len(vector_list) - 1):
                file_object.write(str(vector_list[index]) + ",")
            file_object.write(str(vector_list[len(vector_list) - 1]) + "\n")
    file_object.close()
Example #52
0
    def __init__(self, album_file):
        """
        if the album info is given by `lyrics/kendrick/damn.json
        then the corpus path is given by `lyrics/kendrick/damn/`
        """

        # split into ('lyrics/kendrick/damn', '.json')
        root, ext = os.path.splitext(album_file)
        root += '/'
        self.artist, self.title, song_titles = genius.parse_album_file(
            album_file)
        self.corpus = PlaintextCorpusReader(root,
                                            r'.*\.txt',
                                            word_tokenizer=tokenizer)
        super().__init__(self.corpus.words())
        self.tracks = [
            Song(root + fileid, title=title)
            for title, fileid in zip(song_titles, self.corpus.fileids())
        ]
Example #53
0
 def generate_doc_list(self):
     print('generate_doc_list!')
     all = Reader(self.path, '.*\.txt')
     for id in all.fileids():
         if re.match(FactFeel.file_path_expression, id):
             if re.match(FactFeel.file_expression, id):
                 if re.match(FactFeel.fact_expression, id):
                     #print('fact :{}'.format(id))
                     self.facts.append(id)
                 elif re.match(FactFeel.feel_expression, id):
                     #print('feel :{}'.format(id))
                     self.feels.append(id)
     ('fs:{}'.format(all.fileids()))
     print('feels{}'.format(self.feels))
     print('facts{}'.format(self.facts))
     self.doclist[FactFeel.FACT] = self.facts
     self.doclist[FactFeel.FEEL] = self.feels
     with open(self.doc_list_path, 'w+') as outfile:
         json.dump(self.doclist, outfile)
Example #54
0
def read_text(path):
    if os.path.isdir(path):
        raw = PlaintextCorpusReader(path, '.*').raw()
        tokens = nltk.tokenize.word_tokenize(raw)
    else:
        f = open(path)
        raw = f.read()
        tokens = nltk.tokenize.word_tokenize(raw)

    return Text(tokens)
Example #55
0
def occurrence_mot_i_classe_stem(mot, classe, corpus):
    compteur = 0
    for dirs in os.walk(corpus):
        corpus_root = dirs[0]  #parcour l'arborescence du chemin
        if os.path.basename(corpus_root) == classe:
            textlist = PlaintextCorpusReader(corpus_root, '.*')
            for files in textlist.fileids():
                test = corpus_root + '/' + files
                x = open(test, 'r')
                for ligne in x:
                    lign = ligne.split()
                    for mt in lign:
                        mt = mot_mot(alphabet, mt)
                        mt = SnowballStemmer("english").stem(mt)
                        if mt == mot:
                            #                    if ligne.find(mot)>0:
                            compteur += 1
                x.close()
    return compteur
Example #56
0
def extractParasInList(name):
    corpuslocation ='/home/aniszaman/seniorProject/combined/carnivore'
    reader = PlaintextCorpusReader(corpuslocation, '.*\.txt')
    # This gives the list of paragraphs. every paragraph list contains ist of sentences
    # So it is a list of lists. Bunch of sentenses as a list joins together to make  		#lists of pararagraph
    pList = []
    paragraphlist =  reader.paras(name) #'simpleTuring.txt'
    numpara = len(paragraphlist)
    for sentlist in paragraphlist:
        numsent = len(sentlist)
     	paraAsAList = []
     	# this loops through all the sentence lists and make them one list'''
        for i in range(numsent):
     		paraAsAList = paraAsAList + sentlist[i]	
     	paraAsAString = ""
     	for word in paraAsAList:
        		paraAsAString = paraAsAString + word + str(" ")
        pList.append(paraAsAString)
    return pList
Example #57
0
def diccionario_bigramPalabras():
    # Lectura y transformación de Corpus
    corpus = PlaintextCorpusReader("Corpus", '.*')
    tokenizer = RegexpTokenizer(r'[a-zA-Záéíóúñ]+')
    tokens = tokenizer.tokenize(corpus.raw())
    
    # Generación diccionario bigram de palabras + frecuencia
    bigrams_orig = bigrams(tokens)
    fdist = FreqDist(bigrams_orig)
    dict_bigrams = {}
    for b in fdist:
        b_tr = (b[0], traducciones.traduce_numerico(b[1]))
        try:
            if dict_bigrams[b_tr][1] < fdist.get(b):
                dict_bigrams[b_tr] = [b, fdist.get(b)]
        except:
            dict_bigrams[b_tr] = [b, fdist.get(b)]

    return dict_bigrams
Example #58
0
def get_sentences_for_text(corpus_root, filename, lang='english'):
    """Segments the given text into sentences.

  Args:
    corpus_root: Directory in which the text file is residing.
    filename: Name of the text file.
    lang: Tokenizer language. For possible values, look at:
    ${NLTK_DATA}/tokenizers/punkt

  Returns:
    Sentences in the given text. 

  """
    tokenizer_path = 'tokenizers/punkt/' + lang + '.pickle'
    text = PlaintextCorpusReader(
        corpus_root, [filename],
        word_tokenizer=WhitespaceTokenizer(),
        sent_tokenizer=nltk.data.LazyLoader(tokenizer_path))
    return text.sents()
Example #59
0
def textmetrics():

    local = input(
        u'Digite o caminho para a pasta onde está seu arquivo de texto :')

    corpus_root = (str(local))

    wordlists = PlaintextCorpusReader(corpus_root, '.*')

    nomes = wordlists.fileids()

    print("")
    print("Arquivos de Texto Encontrados:")
    print(nomes)
    print("")

    escolha = input("Escolha o arquivo que quer pesquisar :")

    print("")
    u = open(local + '\\' + escolha, 'rb').read().decode('latin-1')

    t = nltk.word_tokenize(u)
    tokens = len(t)
    tokensunicos = len(set(t))
    diver = tokens / tokensunicos
    print(u"Número de tokens no texto: " + str(tokens))
    print(u"Número de tokens ùnicos no texto: " + str(tokensunicos))
    print(u"Diversidade Léxica: " + str(float(diver)))
    palavras = []
    for i in range(len(t)):
        if t[i].isalpha():
            trans = t[i].lower()
            if len(trans) > 1 and trans not in stopwords.words('portuguese'):
                palavras.append(trans)

    fdist1 = FreqDist(palavras)
    vocab = (fdist1.items())
    vocab1 = sorted([(tpl[1], tpl) for tpl in vocab], reverse=True)
    vocab2 = OrderedDict([tpl[1] for tpl in vocab1])
    print('Lista das 50 palavras mais frequentes: ')
    print(list(vocab2)[:50])

    fdist1.plot(50, title=u'Distribuição de Frequencia (50 mais comuns):')
Example #60
0
 def __init__(self, files):
     Corpus.corpus = PlaintextCorpusReader('./data/speeches', files)
     Corpus.speech = Speech(self.corpus.raw(), self.corpus.words(),
                            self.corpus.sents(), self.corpus.paras(), None,
                            None, None, None)
     Corpus.speeches = None
     Corpus.years = [
         int(year.split('.')[0]) for year in self.corpus.fileids()
     ]
     Corpus.complementary = None