def bet(): template = os.getcwd() + "/grammartemp.docx" document = MailMerge(template) print(request.form) word = request.form["input"] dictionary = str(PyDictionary.googlemeaning(word)) dictionary = dictionary.split('\n') document.merge(WORD=word.upper(), part=dictionary[0].split(": ")[1].rstrip(), defo=dictionary[1].split(".")[0]) document.write(os.getcwd() + "/files/gramarian.docx") return send_from_directory(directory=os.getcwd() + "/files", filename='gramarian.docx', as_attachment=True)
def dictionary(self): # Returns meaning, synonym and antonym of any word Dict = PyDictionary() print_say('\nEnter word', self) word = input() print('\nMeaning : ' + str(Dict.googlemeaning(word))) blockPrint() syn = Dict.synonym(word) ant = Dict.antonym(word) if syn is not None: syn = [x.encode('UTF8') for x in syn] if ant is not None: ant = [x.encode('UTF8') for x in ant] enablePrint() print('\nSynonyms : ' + str(syn)) print('\nAntonyms : ' + str(ant))
def pieceOfLyric(self, video, numWord): """ Para la que la Api de MusixMatch funcione solo neceisita un trozo de la letra con palabras relevantes, por ello se usa pieceLyric. Que se queda con las palabras mayores de tres letras""" lyric = self.scanOCR(video) searchLyric = lyric[100:] searchLyriclist = searchLyric.split() listLyric = "" lenLyriclist = len(searchLyriclist) count = 0 countlen = 0 dictionary = PyDictionary() while (count <= numWord) and (countlen < lenLyriclist): # El numero de letras por palabra es mayor de dos if len(searchLyriclist[countlen]) > 2: if searchLyriclist[countlen] not in listLyric: #Busca si la palabra existe en google if dictionary.googlemeaning(searchLyriclist[countlen]): listLyric = listLyric + " " + searchLyriclist[countlen] count += 1 countlen += 1 return listLyric
def pieceOfLyric(self, video, numWord): """ Para la que la Api de MusixMatch funcione solo neceisita un trozo de la letra con palabras relevantes, por ello se usa pieceLyric. Que se queda con las palabras mayores de tres letras""" lyric = self.scanOCR(video) searchLyric = lyric[100:] searchLyriclist = searchLyric.split() listLyric = "" lenLyriclist = len(searchLyriclist) count = 0 countlen = 0 dictionary=PyDictionary() while (count <= numWord) and (countlen < lenLyriclist): # El numero de letras por palabra es mayor de dos if len(searchLyriclist[countlen]) > 2: if searchLyriclist[countlen] not in listLyric: #Busca si la palabra existe en google if dictionary.googlemeaning(searchLyriclist[countlen]): listLyric = listLyric + " " + searchLyriclist[countlen] count += 1 countlen += 1 return listLyric
from PyDictionary import PyDictionary dictionary = PyDictionary() import pyttsx engine = pyttsx.init() engine.setProperty('rate', 150) variable = "" while variable != 'quit': variable = raw_input('Lookup a word, type something in: ') meaning = (dictionary.meaning(variable)) synonym = (dictionary.synonym(variable)) antonym = (dictionary.antonym(variable)) translate = (dictionary.translate(variable, 'es')) google = (dictionary.googlemeaning(variable)) print("meaning :", meaning) print('\n') print("synonym :", synonym) print('\n') print("antonym :", antonym) print('\n') print("translated to spanish :", translate) print('\n') print("google meaning: ", google) engine.say('google meaning is ') engine.say(google) engine.runAndWait()
most_occur = sample.most_common(11) # now assign to our variables termOne, termTwo, termThree, termFour, termFive, = [ most_occur[i] for i in (0, 1, 2, 3, 4) ] termOne = str(termOne) # define a funct that takes in string and reviews it replace all designated symbols with "" def strip_chars(str, chars): return "".join(c for c in str if c not in chars) # resulting is termOneDone which is stripped of symbols and nums and ready for wiki term_one_done = (strip_chars(termOne, "(),'0123456789[]")) # Print first term try: term_one_defined = (wikipedia.summary(term_one_done, sentences=2)) except: try: term_one_defined = (dictionary.googlemeaning(term_one_done)) except: term_one_defined = (emoji.emojize( ':angry_face: Sorry, this term is too vague to define')) print(term_one_done) print(term_one_defined)
dictionary=PyDictionary() r = sr.Recognizer() m = sr.Microphone() print("A moment of silence, please...") with m as source: r.adjust_for_ambient_noise(source) print("Set minimum energy threshold to {}".format(r.energy_threshold)) print("Say The Word....") with m as source: audio = r.listen(source) print("Got it! Now to recognize it...") try: # recognize speech using Google Speech Recognition value = r.recognize_google(audio) word=str(value) print word word=word.replace(' ','') print word except sr.UnknownValueError: print("Oops! Didn't catch that") except sr.RequestError as e: print("Plese Check your internet Connection") if(type(None)!=type(dictionary.googlemeaning(word))): print('\n\n\n'+dictionary.googlemeaning(word)) else: print "Even Google Doesn't Know Its Meaning ...!!!!!!!"
# turn to string termFive = str(termFive) # define a funct that takes in string and reviews it replace all designated symbols with "" def strip_chars(str, chars): return "".join(c for c in str if c not in chars) # resulting is termThreeDone which is stripped of symbols and nums and ready for wiki termFiveDone = (strip_chars(termFive, "(),'0123456789[]")) # Print first term print(termOneDone) try: print(wikipedia.summary(termOneDone, sentences=2)) except: try: print(dictionary.googlemeaning(termOneDone)) except: print(emoji.emojize(':angry_face: Sorry, this term is too vague to define')) print("") # Print second term print(termTwoDone) try: print(wikipedia.summary(termTwoDone, sentences=2)) except: try: print(dictionary.meaning(termTwoDone)) # print(dictionary.googlemeaning(termTwoDone)) except: print(emoji.emojize(':angry_face: Sorry, this term is too vague to define')) print("")
def add(request): dictionary = PyDictionary() warnings.catch_warnings() warnings.simplefilter("ignore") features = "html.parser" weblink = request.GET['link'] warnings.catch_warnings() warnings.simplefilter("ignore") features = "html.parser" web_url = weblink # web_url = input('Enter article url: ') # content_id = input('Enter content ID from HTML: ') page = requests.get(web_url) html_page = page.content soup = BeautifulSoup(html_page, 'lxml') good_content = soup.find('body') tit = soup.title.string # strip out all non-text content from site just_the_text = (good_content.get_text()) # create empty list of my cap_words cap_words = [] # strip all the other stuff off just_the_text just_the_text = just_the_text.strip() # identify all capitalized words over 3 chars and adds to cap_words for word in just_the_text.split(): if word.istitle() and len(word) > 3: cap_words.append(word) else: pass data_set = ' '.join(map(str, cap_words)) # split() returns list of all the words in the string split_it = data_set.split() # Pass the split_it list to instance of Counter class. sample = Counter(split_it) # input values and their respective counts. most_occur = sample.most_common(11) # now assign to our variables termOne, termTwo, termThree, termFour, termFive, = [ most_occur[i] for i in (0, 1, 2, 3, 4) ] termOne = str(termOne) # define a funct that takes in string and reviews it replace all designated symbols with "" def strip_chars(str, chars): return "".join(c for c in str if c not in chars) # resulting is termOneDone which is stripped of symbols and nums and ready for wiki term_one_done = (strip_chars(termOne, "(),'0123456789[]")) termTwo = str(termTwo) # define a funct that takes in string and reviews it replace all designated symbols with "" def strip_chars(str, chars): return "".join(c for c in str if c not in chars) # resulting is termOneDone which is stripped of symbols and nums and ready for wiki term_two_done = (strip_chars(termTwo, "(),'0123456789[]")) termThree = str(termThree) # define a funct that takes in string and reviews it replace all designated symbols with "" def strip_chars(str, chars): return "".join(c for c in str if c not in chars) # resulting is termOneDone which is stripped of symbols and nums and ready for wiki term_three_done = (strip_chars(termThree, "(),'0123456789[]")) # Print first term try: term_one_defined = (wikipedia.summary(term_one_done, sentences=3)) except: try: term_one_defined = (dictionary.googlemeaning(term_one_done)) except: term_one_defined = (emoji.emojize( ':angry_face: Sorry, this term is too vague to define')) # Print second term try: term_two_defined = (wikipedia.summary(term_two_done, sentences=3)) except: try: term_two_defined = (dictionary.googlemeaning(term_two_done)) except: term_two_defined = (emoji.emojize( ':angry_face: Sorry, this term is too vague to define')) # Print third term try: term_three_defined = (wikipedia.summary(term_three_done, sentences=3)) except: try: term_three_defined = (dictionary.googlemeaning(term_three_done)) except: term_three_defined = (emoji.emojize( ':angry_face: Sorry, this term is too vague to define')) #term_two_looking = True #while term_two_looking == True: return render( request, 'DEMOAPP/results.html', { 'article_title': tit, 'term_1': term_one_done, 'term_1_def': term_one_defined, 'term_2': term_two_done, 'term_2_def': term_two_defined, 'term_3': term_three_done, 'term_3_def': term_three_defined, 'url': weblink, })