def checkStopWordGram(extractToken): for wordExtract in extractToken: checkWord = functionPython.findWordFromList(listOfStopWord, wordExtract) if checkWord == "True": extractToken.remove(wordExtract) return extractToken
def checkStopWordGram(extractToken): newExtractToken = [] for wordExtract in extractToken: checkWord = functionPython.findWordFromList(listOfStopWord, wordExtract) if checkWord == "False": newExtractToken.append(wordExtract) return newExtractToken
else: sentence = sentence + data[j] #print(listOfSentence) listOfTotalSentence.append(listOfSentence) listOfSentence = [] print(listOfTotalSentence) wordToCount = {} checkWord = "" for i in range(0, len(listOfTotalSentence)): extractToken = t.bn_word_tokenizer(listOfTotalSentence[i][0]) #print(listOfTotalSentence[i]) for word in extractToken: checkWord = functionPython.findWordFromList(listOfTotalWord, word) if checkWord == "True": checkWord = "" if word not in wordToCount.keys(): wordToCount[word] = 1 else: wordToCount[word] += 1 print(wordToCount) #print(wordToCount.items()) wordToList = [] for key, value in wordToCount.items(): temp = [key,value]
break else: sentence = sentence + data[j] #print(listOfSentence) listOfTotalSentence.append(listOfSentence) listOfSentence = [] print(listOfTotalSentence) wordToCount = {} checkWord = "" for i in range(0, len(listOfTotalSentence)): extractToken = t.bn_word_tokenizer(listOfTotalSentence[i][0]) #print(listOfTotalSentence[i]) for word in extractToken: checkWord = functionPython.findWordFromList(listOfStopWord, word) if checkWord == "False": checkWord = "" if word not in wordToCount.keys(): wordToCount[word] = 1 else: wordToCount[word] += 1 print(wordToCount) #print(wordToCount.items()) wordToList = [] for key, value in wordToCount.items(): temp = [key, value] wordToList.append(temp)