def dictionary(command): dictionary = PyDictionary() words = command.split() choice = words[0] word = str(words[-1]) print(choice) print(word) try: if choice == "define": definition = str(dictionary.meaning(word)) return(definition) elif choice == "synonyms": synonyms = dictionary.synonym(word) result = ', '.join(synonyms) print(result) return result elif choice == "antonyms": antonyms = dictionary.antonym(word) result = ', '.join(antonyms) print(result) return result else: return "Please retry your question" except TypeError: return ("Your word had no " + choice)
def DictExpandQuery(q_terms, k=5): dic = PyDictionary() new_terms = [] for term in q_terms: if isStopWord(term): continue # check if word exists in the dictionary w_found = True try: dic.meaning(term) except: w_found = False # get k first synonyms if w_found: try: synonyms = dic.synonym(term) except: continue if synonyms == None: continue if len(synonyms) > k: synonyms = synonyms[:k] new_terms.extend(synonyms) new_query_terms = q_terms + new_terms return new_query_terms
def game(): words = create_word_list() lookup = PyDictionary() global attempts, wins idx = 0 answer = random.choice(words) while idx < 3: clue = lookup.synonym(answer)[idx] now = time.time() future = now + 10 print('\nClue: ' + clue) guess = input('Guess: ').lower() if guess == answer or guess + 's' == answer or guess == answer[:-3]: print("\nCorrect!") wins += 1 break elif now > future: print("You ran out of time! The answer was %s." % answer) break else: print("\nWrong.") idx += 1 if idx == 3: print("\nThe answer was %s." % answer) attempts += 1 print("Game over. Your score was %d / %d." % (wins, attempts)) print('-' * 10) words.remove(answer) restart()
def process_dictionary(word): meaning = "You searched for the word {}. " dictionary = PyDictionary(word) our_meaning = dictionary.getMeanings() meaning = meaning.format(our_meaning.keys()[0]) l = zip(our_meaning.values()[0].keys(),our_meaning.values()[0].values()[0]) for idx in l: meaning += idx[0] + ":" + idx[1] + ", " return meaning[:-1]
def antonym(word): try: word = word.lower() dictionary = PyDictionary() anto_list = (dictionary.antonym(word)) print("The antonym(s) of the word %s are:"%word) for i in range(0,len(anto_list)): print (str(i+1)+')'+anto_list[i].encode('ascii')) except TypeError: word = raw_input("Re-enter the word with the correct spelling: ") antonym(word)
def dictionary(word): i=0 #while (1): print word dictionary=PyDictionary() dict=dictionary.meaning(word) if dict is not None: espeak.synth("your word is " + word) time.sleep(2.5) if ( dict.has_key('Adjective')) : s= dict['Adjective'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(adjective)" + s[i]) time.sleep(t) if dict.has_key('Noun') : s= dict['Noun'] if len(s)>=i : print s[i] l= len(s[0]) t = l /12.0 espeak.synth("(NOUN)" + s[i]) time.sleep(t) if dict.has_key('Verb') : s= dict['Verb'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("VERB" + s[i]) time.sleep(t) if dict.has_key('Adverb') : s= dict['Adverb'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(ADVERB)" + s[i]) time.sleep(t) if dict.has_key('Preposition') : s= dict['Preposition'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(PREPO)" + s[i]) time.sleep(t) print 5
def dictionary(word): i=0 while (1): dictionary=PyDictionary() dict=dictionary.meaning(word) if (dict.has_key('Adjective')) : s= dict['Adjective'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(adjective)" + s[i]) time.sleep(t) if dict.has_key('Noun') : s= dict['Noun'] if len(s)>=i : print s[i] l= len(s[0]) t = l /12.0 espeak.synth("(NOUN)" + s[i]) time.sleep(t) if dict.has_key('Verb') : s= dict['Verb'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("VERB" + s[i]) time.sleep(t) if dict.has_key('Adverb') : s= dict['Adverb'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(ADVERB)" + s[i]) time.sleep(t) if dict.has_key('Preposition') : s= dict['Preposition'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(PREPO)" + s[i]) time.sleep(t) espeak.synth("If alternate meaning required, give a double tap within the next 3 seconds")
def handle(text, mic, profile): lst = text.split() text = lst[len(lst)-1] if(text): dictionary=PyDictionary() mean = dictionary.meaning(text) if not mean: mic.say("I'm sorry I couldn't find the meaning of the word "+text) return mic.say(text) for keys in mean: mic.say(keys) lst = mean[keys] for l in lst: mic.say(l)
async def antonym(self, word): """Checks the dictionary for the antonyms for a given word.""" word = word.lower() result = dictionary.antonym(word) try: text = self.nym_text_format("antonyms", result, word) return await self.bot.say(text) except TypeError: return await self.bot.say("No results found. Are you " + "searching for a valid word?")
def __init__(self): #dictionary related self.dictionary_offline=PyDictionary() #appindicator related self.indicator=appindicator.Indicator("dictionary-indicator", gtk.STOCK_SPELL_CHECK, appindicator.CATEGORY_APPLICATION_STATUS) self.indicator.set_status(appindicator.STATUS_ACTIVE) self.indicator.set_attention_icon("Dictionary") self.start() self.indicator.set_menu(self.menu)
def pieceOfLyric(self, video, numWord): """ Para la que la Api de MusixMatch funcione solo neceisita un trozo de la letra con palabras relevantes, por ello se usa pieceLyric. Que se queda con las palabras mayores de tres letras""" lyric = self.scanOCR(video) searchLyric = lyric[100:] searchLyriclist = searchLyric.split() listLyric = "" lenLyriclist = len(searchLyriclist) count = 0 countlen = 0 dictionary=PyDictionary() while (count <= numWord) and (countlen < lenLyriclist): # El numero de letras por palabra es mayor de dos if len(searchLyriclist[countlen]) > 2: if searchLyriclist[countlen] not in listLyric: #Busca si la palabra existe en google if dictionary.googlemeaning(searchLyriclist[countlen]): listLyric = listLyric + " " + searchLyriclist[countlen] count += 1 countlen += 1 return listLyric
class Synsets(object): def __init__(self, synsets={}): # synsets are hashmap of (string:Word objects) pair self.dictionary = PyDictionary() self.synsets = synsets def find(self, word): try: return map(str, self.dictionary.synonym(word)) except: if word not in synsets: return [] return synsets[word].synonyms def add(self, synsets): self.synsets.update(synsets)
def __init__(self): self.lambda_comp_tree=AVLTree() self.index_tree=BinaryTree() self.word_list=[] self.word_dict={} self.index_dict={} self.index_list=[] self.lambda_expression=[] self.lambda_composition="" self.graph_tensor_neuron_network_intrinsic_merit=1.0 self.entropy=10000000000.0 self.conceptnet=ConceptNet5Client() #self.Similarity="ConceptNet" self.Similarity="WordNet" self.ClosedPaths=True self.dictionary=PyDictionary()
async def dictionary(self, word): """Checks the dictionary for the meanings of a given word.""" word = word.lower() try: result = dictionary.meaning(word) nountext = self.meaning_text_format("Noun", result) verbtext = self.meaning_text_format("Verb", result) adjtext = self.meaning_text_format("Adjective", result) advtext = self.meaning_text_format("Adverb", result) except TypeError: return await self.bot.say("No results found. Are you " + "searching for a valid word?") text = "\n" + nountext + verbtext + adjtext + advtext definition = "Found the following definitions for **" + word + "**:" \ + text return await self.bot.say(definition)
class Meaning(): def __init__(self): self.dictionary=PyDictionary() def meaning_function(self,query,task="mn"): #task can be meaning, translate, fo=open("meaning.txt","w") if task == "mn" : fo.write("Meaning :") fo.write(str(self.dictionary.meaning(query))) fo.write("Synonym :") fo.write(str(self.dictionary.synonym(query))) fo.write("Antonym :") fo.write(str(self.dictionary.antonym(query))) print (self.dictionary.meaning(query)) elif task =="tr": fo.write("Translation :") unicodedata.normalize('NFKD', self.dictionary.translate(query,'hi')).encode('ascii','ignore') fo.write(unicodedata.normalize('NFKD', self.dictionary.translate(query,'hi')).encode('ascii','ignore')) ##Unicode to string conversion print(self.dictionary.translate(query,'hi')) fo.close() def __del__(self): os.remove("meaning.txt")
and a grounded synonym ''' try: sentence=re.findall(r"[\w']+", sentence) sentence=remove_stopwords(sentence) sentence=[root(word) for word in sentence] sentence=' '.join(sentence) except: sentence=sentence return sentence stop_words = stopwords.words('english') # upload a dictionary of stopwords, common words like 'and' wordnet_lemmatizer = WordNetLemmatizer() # there are other options, but as the language quality does not matter we will stick with what is readily available dictionary = PyDictionary() # using the python dictionary to generate synonyms. Can do better maybe syn_dict={} data='../repos/' dUK=pickle.load(open(data+'dUK.pkl','rb')) pUK=pd.DataFrame.from_dict(dUK,orient='index') pUK['SM']=pUK['SM'].apply(ground_words) pUK.to_pickle(data+'pUK.pkl') pUK['LD']=pUK['LD'].apply(ground_words)
# -*- coding: utf-8 -*- """ Created on Sun Nov 27 17:17:59 2016 @author: SRINIVAS """ from pytrends.request import TrendReq from PyDictionary import PyDictionary dictionary=PyDictionary() import time import speech_recognition as sr rfe=open('thanks.txt','r') time.sleep(1) r = sr.Recognizer() try: with sr.Microphone() as source: # use the default microphone as the audio source audio = r.listen(source) sentence=r.recognize_google(audio) print('I heard that you said:\n',sentence) except: sentence=input('Enter a sentence (we could not figure out what you said)') google_username = "******" google_password = "******" path = "" sent=[] for elem in sentence.split(' '):
return 0 def text(number,msg,ACCOUNT_SID,AUTH_TOKEN) : try : client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN) client.messages.create( to=number, from_="+xxxxxxxxxxxxx", body = msg, ) return 1 except : return 0 kernel = aiml.Kernel() dictionary=PyDictionary() if os.path.isfile("bot_brain.brn"): kernel.bootstrap(brainFile = "bot_brain.brn") else: kernel.bootstrap(learnFiles = "std-startup.xml", commands = "load aiml b") kernel.saveBrain("bot_brain.brn") # kernel now ready for use while True: message = raw_input("Enter your message to the bot: ") if message == "quit": exit()
class ai_guesser(guesser): CATEGORIES = "ai4games/categories.txt"; WIKI_DICT_SET = "ai4games/wikiDict.txt"; def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None): self.num = 0 self.getCategories() self.actual_dictionary = PyDictionary() self.wikiDict = {} self.readInSummaries(); self.cm_wordlist = [] with open('players/cm_wordlist.txt') as infile: for line in infile: self.cm_wordlist.append(line.rstrip()) self.classifyCategories() self.boardSum = {} self.curGuesses = [] def get_board(self, words): self.words = words self.readBoard(words) def get_clue(self, clue, num): self.clue = clue self.num = num print("The clue is:", clue, num, sep=" ") li = [clue, num] return li def keep_guessing(self, clue, board): return len(self.curGuesses) > 0 def give_answer(self): #add the new guesses to the list of possible guesses self.curGuesses.extend(self.chooseWords(self.clue, self.num, self.words)) self.reSortGuesses() bestGuess = self.curGuesses.pop(0).split("|")[0] return bestGuess #returns a string for the guess ''' ALGORITHM: -train- 1. get the categories 2. get the wikipedia articles for the categories 3. use the bag of words from the articles as the training data for the categories -give response- 4. get the clue and the number 5. calculate the probabilities of each word occuring in the category 6. choose the highest x probability words ''' #sort the guesses based on value def reSortGuesses(self): #split sortD = {} for g in self.curGuesses: p = g.split("|") sortD[str(p[0])] = float(p[1]) #sort + reform newsort = [] for k, v in sorted(sortD.items(), key=lambda item: float(item[1]), reverse=True): newsort.append(str(k) + "|" + str(v)) self.curGuesses = newsort def getCategories(self): self.categories = open(self.CATEGORIES, "r").read().split(",") self.categories = list(map(lambda x: x.strip(), self.categories)) def readInSummaries(self): self.wikiDict = {} wd = open(self.WIKI_DICT_SET, "r").read() wd_lines = wd.split('\n--\n--\n') for l in wd_lines: if l.strip() == "": continue parts = l.split(":") ''' if(len(parts) != 2): print(l) ''' c = parts[0].strip() s = parts[1].strip() self.wikiDict[c] = s.split(" ") #tokenize the summaries for the words on the board def readBoard(self, boardWords): if(len(self.boardSum.keys()) > 0): #already done return self.boardSum = {} actual_dict = PyDictionary n = 0 for b in boardWords: c = b.lower() n+=1 print(str(n) + "/" + str(len(boardWords)) + " board words summarized : " + c + " ", end='\r') try: p = wikipedia.summary(c) except wikipedia.DisambiguationError as er: #if this still doesn't work the library is shit and just get the definition of the word try: #print(er.options[0:3]) #print(e.options[0]) p = wikipedia.summary(er.options[0], sentences=1) except: defin = actual_dict.meaning(c) if defin is None: p = c + " word" else: p = max(list(defin.values()), key=len) #return longest definition except wikipedia.PageError: if type(p) is list: space = " " p = space.join(p) #whatever just get the definition then except: defin = actual_dict.meaning(c) if defin is None: p = c + " word" else: p = max(list(defin.values()), key=len) #return longest definition if type(p) is list: space = " " p = space.join(p) #clean up this hot mess #print(p) #print(p.encode('unicode_escape')) words = p.split(" ") words = list(map(lambda x: x.lower(), words)) #lowercase table = str.maketrans('', '', string.punctuation) words = list(map(lambda x: x.translate(table), words)) #remove punctuation words = list(filter(lambda x: x != "", words)) #remove empty space summ = " ".join(words) words = [word for word in word_tokenize(summ) if not word in stopwords.words() and word.isalnum()] self.boardSum[c] = words #print(" ".join(words)) #creates the occurence matrix for probabilities def classifyCategories(self): #get bag of words all_words = [] bag_of_words = {} self.trainTotal = 0 self.catSet = {} #import it now if len(list(self.wikiDict.keys())) == 0: n = 0 for c in self.categories: n+=1 print(str(n) + "/" + str(len(self.categories)) + " categories summarized : " + c + " ", end='\r') try: p = wikipedia.summary(c) except wikipedia.DisambiguationError as er: #if this still doesn't work the library is shit and just get the definition of the word try: #print(er.options[0:3]) #print(e.options[0]) p = wikipedia.summary(er.options[0]) except: defin = self.actual_dictionary.meaning(c) if defin is None: p = c + " word" else: p = max(list(defin.values()), key=len) #return longest definition except wikipedia.PageError: if type(p) is list: space = " " p = space.join(p) #whatever just get the definition then except: defin = self.actual_dictionary.meaning(c) if defin is None: p = c + " word" else: p = max(list(defin.values()), key=len) #return longest definition if type(p) is list: space = " " p = space.join(p) #print(p) p.replace('\n', " ") summ = p summ = summ.lower() artWords = [word for word in word_tokenize(summ) if not word in stopwords.words() and word.isalnum()] self.catSet[c] = artWords bag_of_words[c] = artWords self.trainTotal += len(artWords) #add to the whole total for w in artWords: if w not in all_words: all_words.append(w) #use the external file else: i = 0 for c in self.wikiDict.keys(): i+= 1 print(str(i) + "/" + str(len(self.wikiDict.keys())) + " ", end='\r') #summ = " ".join(self.wikiDict[c]) #artWords = [word for word in word_tokenize(summ) if not word in stopwords.words() and word.isalnum()] artWords = self.wikiDict[c] self.catSet[c] = artWords bag_of_words[c] = artWords self.trainTotal += len(artWords) #add to the whole total for w in artWords: if w not in all_words: all_words.append(w) print("IMPORTED WORD SET") #get the counts for the probabilities self.classifyCats = {} #dict[category][word] = #; dict[category][TOTAL_NUM] = #; dict[word+"_TOTAL"] = # word_cts = {} for w in all_words: word_cts[w] = 0 #get the counts for each category and word for c in self.categories: w, cts = np.unique(bag_of_words[c], return_counts=True) self.classifyCats[c] = {} for a in all_words: self.classifyCats[c][a] = 0 #default to 0 if a in w: #get the count for this word in the category article ind = np.where(w==a) self.classifyCats[c][a] = cts[ind] word_cts[a] += cts[ind] #add to the word's total count self.classifyCats[c]["TOTAL_NUM"] = len(bag_of_words[c]) self.word_cts = word_cts #P(x) - x is a word def wordProb(self, x): return self.word_cts[x] / self.trainTotal #P(c) - c is a category def categoryProb(self, c): return self.classifyCats[c]["TOTAL_NUM"]/self.trainTotal #P(x|c) - x is a word, c is a category def featcategoryProb(self, x, c): if x not in self.classifyCats[c].keys(): return 0 return self.classifyCats[c][x]/self.classifyCats[c]["TOTAL_NUM"] #laplace smoothing instead? alternative P(x|c) def laplace(self, x, c): m = 1 # smoothing amount (add-m) if x in self.classifyCats[c].keys(): t = self.classifyCats[c][x] # number of x's for class C else: t = 0 s = len(self.word_cts.keys()) # possible values for x N = self.classifyCats[c]["TOTAL_NUM"] # number of total Cs return float((t+m) / (N + (m*s))) #gets the all probabilities of a word belonging to any category c #P(c|x) = P(x_1|c)*P(x_2|c)*...*P(x_n|c)*P(c) def allCategoryProb(self, x): artWords = self.boardSum[x] #calculate the probability for x belonging to each category catSet = {} for c in self.categories: p = 1 for w in artWords: p *= self.laplace(w,c) #p *= self.featcategoryProb(w,c) #continue multiplying probabilities together if p == 0: #if 0, cancel calculations break p *= self.categoryProb(c) catSet[c] = float(p) return catSet #get the words most related to the clue (assuming the clue is a category word) def chooseWords(self, clue, num, boardWords): catProbs = {} for b in boardWords: if "*" in b: continue x = b.lower() allCats = self.allCategoryProb(x) #print(allCats) if clue in allCats: catProbs[x] = allCats[clue] else: catProbs[x] = 0 outD = [] for k, v in sorted(catProbs.items(), key=lambda item: float(item[1]),reverse=True): outD.append(str(k) + "|" + str(v)) print("%s: %s" % (k, v)) #print(outD[:num]) #return the top x guesses return outD[:num]
# from pprint import pformat import asyncio from bot.client.getkey import readKey from bot.utils import msgutils, userutils, miscutils from bot.handlers import message_handler, bot_prefix, strutils from discord import Embed, NotFound, HTTPException import re from PyDictionary import PyDictionary from googletrans import Translator import urbandict import pyimgur # import sympy from PIL import ImageFile, Image dictionary = PyDictionary() translator = Translator() imgur_client = pyimgur.Imgur(readKey(1)) async def info(bot, msg, reg): em = Embed(title="Who am I?", colour=miscutils.colours['orange']) em.description = "Hi, I'm [Persimmon](https://github.com/UnsignedByte/Persimmon), a discord bot created by " + ( await userutils.get_owner(bot)).mention + "." em.add_field( name="Features", value="For information about my features do `" + bot_prefix + "help` or take a look at [my github](https://github.com/UnsignedByte/Persimmon/)!" ) await msgutils.send_embed(bot, msg, em)
def __init__(self, bot): self.bot = bot self.dictionary = PyDictionary('lxml')
class Game: __dictionary = PyDictionary() __LIGHT_TEAL = (175, 238, 238) __RED = (255, 0, 0) __word = "" __shuffled_word = "" __submission = "" __shuffled_word_positions = [] __chosen = [] __words = [] __total_words = 0 def __init__(self): SCREEN.fill(self.__LIGHT_TEAL) self.__read_words() self.extra = 0 self.__new_word_icon = pygame.image.load('images/next.png') self.new_word_rect = self.__new_word_icon.get_rect() self.__submit_icon = pygame.image.load('images/submit.png') self.submit_rect = self.__submit_icon.get_rect() self.__clear_icon = pygame.image.load('images/clear.png') self.clear_rect = self.__clear_icon.get_rect() self.on_click_new_word() self.__submit_button() self.__clear_button() self.__new_word_button() def __read_words(self): text_file = open("words.txt", "r") self.__words = text_file.readlines() text_file.close() def get_meaning(self): meanings = "" if self.__word != "": mean = self.__dictionary.meaning(str(self.__word)) if mean is not None: for keys in mean: val = mean[keys][0] meanings = "" + str(self.__word) + ": " + str(val) return meanings def __get_word(self): position = random.randint(0, len(self.__words)) self.__word = self.__words[position].strip() while len(self.__word) > 9 or self.get_meaning() == "": position = random.randint(0, len(self.__words)) self.__word = self.__words[position].strip() self.__word = self.__word.upper() def __clear_word(self): start_x = (2*int(W))/len(self.__word) for i in range(0, len(self.__word)): pygame.draw.line(SCREEN, self.__LIGHT_TEAL, (start_x + i*90, H/3 + 50), (start_x + i*90 + 80, H/3 + 50)) rect = pygame.Rect(pygame.Rect(start_x + i * 90, H/2, 80, 70)) pygame.draw.rect(SCREEN, self.__LIGHT_TEAL, rect) def __display_word(self): start_x = (2*int(W))/len(self.__shuffled_word) for i in range(0, len(self.__word)): rect = pygame.Rect(pygame.Rect(start_x + i*90, H/2, 80, 70)) pygame.draw.rect(SCREEN, BLACK, rect) letter_font = pygame.font.SysFont('Calibri (Body)', 60) letter = letter_font.render(str(self.__shuffled_word[i]), False, (255, 245, 255)) SCREEN.blit(letter, (start_x + i*90 + 20, H/2 + 10)) self.__shuffled_word_positions.append(rect) def __clear_chosen(self): self.__chosen.clear() for i in range(0, len(self.__word)): self.__chosen.append(False) def __display_blank(self): start_x = (2*int(W))/len(self.__shuffled_word) for i in range(0, len(self.__word)): pygame.draw.line(SCREEN, (0, 0, 0), (start_x + i*90, H/3 + 50), (start_x + i*90 + 80, H/3 + 50)) def __shuffle(self): words = list(map("".join, permutations(self.__word))) ran = random.randint(0, len(words)-1) while words[ran] == self.__word: ran = random.randint(0, len(words)-1) self.__shuffled_word = words[ran] def __new_word_button(self): self.__new_word_icon = pygame.transform.scale(self.__new_word_icon, (100, 50)) SCREEN.blit(self.__new_word_icon, (W/3 + 100, H/2+100)) self.new_word_rect = self.__new_word_icon.get_rect(x=W/3 + 100, y=H/2+100) def __clear_button(self): self.__clear_icon = pygame.transform.scale(self.__clear_icon, (100, 50)) SCREEN.blit(self.__clear_icon, (W/3 + 130 + 100, H/2 + 100)) self.clear_rect = self.__clear_icon.get_rect(x=W / 3 + 130 + 100, y=H/2 + 100) def __submit_button(self): self.__submit_icon = pygame.transform.scale(self.__submit_icon, (100, 50)) SCREEN.blit(self.__submit_icon, (W / 3 + 260 + 100, H / 2 + 100)) self.submit_rect = self.__submit_icon.get_rect(x=W / 3 + 260 + 100, y=H / 2 + 100) def on_click_shuffle(self): self.__shuffle() self.__display_word() self.__display_blank() self.__clear_chosen() def on_click_new_word(self): if len(self.__submission) != 0: self.clear_submission() if len(self.__word) != 0: self.__clear_word() self.__shuffled_word_positions.clear() self.__total_words += 1 self.__get_word() self.on_click_shuffle() def check_clicked_shuffled_letters(self, x, y): for pos in range(0, len(self.__shuffled_word_positions)): if self.__shuffled_word_positions[pos].collidepoint(x, y): if self.__chosen[pos] is False: self.__submission += self.__shuffled_word[pos] self.__chosen[pos] = True return True if self.__chosen[pos]: text = FONT.render("Letter already chosen", False, (0, 0, 0)) SCREEN.blit(text, (W / 2, 10)) pygame.display.update() pygame.time.delay(800) pygame.draw.rect(SCREEN, self.__LIGHT_TEAL, (W / 2, 0, 240, 30)) pygame.display.update() return False return False def display_meaning(self, correct): meaning = self.get_meaning() if correct: title = "Yay!! Correct Submission :D " else: title = "You learned a new word :) " root = Tk() root.withdraw() if meaning == "": meaning += self.__word messagebox.showinfo(title, str(meaning)) def __display_incorrect(self): text = FONT.render("INCORRECT", False, (0, 0, 0)) SCREEN.blit(text, (W/2, 10)) pygame.display.update() pygame.time.delay(800) pygame.draw.rect(SCREEN, self.__LIGHT_TEAL, (W/2, 0, 150, 30)) pygame.display.update() def submit_status(self): if self.__submission == self.__word: self.display_meaning(True) self.on_click_new_word() return True else: self.__display_incorrect() self.clear_submission() return False def update_display(self): start_x = (2*int(W))/len(self.__shuffled_word) for i in range(0, len(self.__submission)): pygame.draw.rect(SCREEN, BLACK, pygame.Rect(start_x + i * 90, H / 3 - 30, 80, 70)) letter_font = pygame.font.SysFont('Calibri (Body)', 60) letter = letter_font.render(str(self.__submission[i]), False, WHITE) SCREEN.blit(letter, (start_x + i * 90 + 20, H / 3 - 20)) for i in range(0, len(self.__chosen)): if self.__chosen[i]: pygame.draw.line(SCREEN, self.__RED, (start_x + i * 90, H / 2), (start_x + i * 90 + 80, H / 2 + 70)) def clear_submission(self): start_x = (2*int(W))/len(self.__shuffled_word) for i in range(0, len(self.__submission)): pygame.draw.rect(SCREEN, self.__LIGHT_TEAL, pygame.Rect(start_x + i * 90, H / 3 - 30, 80, 70)) for i in range(0, len(self.__chosen)): self.__chosen[i] = False self.__submission = "" self.__display_word() def get_total(self): return self.__total_words
def __init__(self, query, get_synonyms=False): self.dictionary = PyDictionary() self.meaning = self.dictionary.meaning(query) self.synonyms = self.get_synonyms(query) if get_synonyms: self.synonyms = self.get_synonyms(query=query)
import requests from bs4 import BeautifulSoup import wikipedia from collections import Counter from PyDictionary import PyDictionary import warnings import emoji dictionary = PyDictionary() warnings.catch_warnings() warnings.simplefilter("ignore") features = "html.parser" # Define the holders where we will place the top 3 terms when we find them termOne = "Stephen King" termTwo = "Lansdale, PA" termThree = "chairman of the fed" web_url = 'https://www.theatlantic.com/magazine/archive/2020/04/how-to-destroy-a-government/606793/' # web_url = input('Enter article url: ') #content_id = input('Enter content ID from HTML: ') page = requests.get(web_url) html_page = page.content soup = BeautifulSoup(html_page, 'lxml') #good_content = soup.find(id=content_id) #good_content = soup.find('p', {'itemprop': 'articleBody'})
def TaskExecution(): wish() while True: query = takecommand() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<Logic Building to perform tasks>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> date = datetime.datetime.today().strftime("%I:%M %p") if "time now" in query: speak("The time is now " + date + "") elif 'joke' in query or 'funny' in query: speak(pyjokes.get_joke()) elif 'open google' in query or 'search in google' in query: speak("What should i search on Google") google = takecommand().lower() webbrowser.open(f'www.google.com/search?q=' + google) speak("Searching in google...") elif 'open bing' in query or 'search in bing' in query: speak("What should i search on Bing") bing = takecommand().lower() webbrowser.open(f'www.bing.com/search?q=' + bing) speak("Searching in Bing...") elif 'open duckduckgo' in query or 'search in duckduckgo' in query: speak("What should i search on DuckDuckGo") duck = takecommand().lower() webbrowser.open(f'www.duckduckgo.com/search?q=' + duck) speak("Searching in DuckDuckGo...") elif 'open youtube' in query: speak("What do you want me to play") youtube = takecommand().lower() pywhatkit.playonyt(youtube) speak("Playing...") elif "my ip" in query: ip = get('https://api.ipify.org').text speak(f"Your ip address is {ip}") elif 'open wikipedia' in query: speak("What do you want to know from Wikipedia?") wiki = takecommand().lower() info = wikipedia.summary(wiki, 2) speak("According to Wikipedia") speak(info) elif "open notepad" in query: npath = "C:\\Windows\\system32\\notepad.exe" os.startfile(npath) elif "open cmd" in query: os.system("start cmd") elif 'open task manager' in query: tpath = "C:\\Windows\\system32\\Taskmgr.exe" os.startfile(tpath) elif "open steam" in query: spath = "C:\\Program Files (x86)\\Steam\\steam.exe" os.startfile(spath) elif "open epic games" in query: epath = "C:\\Program Files (x86)\\Epic Games\\Launcher\\Portal\\Binaries\\Win32\\EpicGamesLauncher.exe" os.startfile(epath) elif "open browser" in query: bpath = "C:\\Program Files (x86)\\Microsoft\\Edge Dev\\Application\\msedge.exe" os.startfile(bpath) speak("Opening Edge...") elif 'developer' in query or 'made you' in query: speak("My Developer is Niaz Mahmud Akash and Jalish Mahmud Sujon") elif 'thanks' in query or 'thank you' in query or 'thanks a lot' in query: thanks = ["Glad to help you.", "Happy to help", "You're welcome"] thanks_random = random.choice(thanks) speak(thanks_random) elif 'browser' in query: webbrowser.open_new('www.google.com') speak("Opening Browser...") elif 'open facebook' in query: webbrowser.open('www.facebook.com') speak("Opening Facebook...") elif 'open twitter' in query: webbrowser.open('www.twitter.com') speak("Opening Twitter...") elif 'open telegram' in query: webbrowser.open('https://web.telegram.org/') speak("Opening Telegram...") elif 'open youtube' in query: webbrowser.open('www.youtube.com') speak("Opening Youtube...") elif 'open play store' in query: webbrowser.open('https://play.google.com/store/apps') speak("Opening Google PlayStore...") elif 'open instagram' in query: webbrowser.open('www.instagram.com') speak("Opening Instagram...") elif 'love me' in query: love = [ "Of course. You're one of a kind.", "Is that a thing to ask? Of course I LOVE YOU ❤", "Yes, in an at-your-service sor of way" ] love_random = random.choice(love) speak(love_random) elif 'i love you' in query: express = [ "That's so amazing to hear", "I LOVE ME Too!", "If I was you, I'd love me too.", "Of course you do! Friends don't hate each other." ] express_random = random.choice(express) speak(express_random) elif 'will you go on a' in query: go = [ "Sure. Just let me know the place and time", "I'd go anywhere you take me" ] go_random = random.choice(go) speak(go_random) elif 'you robot' in query or 'are you human' in query: speak( "Yes, I am a Robot but a smart one. Let me prove it to you. How can i help you?" ) elif 'your name' in query: speak("My name is Alice. I am your virtual personal assistant.") elif 'how are you' in query or 'hows things' in query or 'how you doing' in query: how = ["I am fine. What about you?", "I am good. How are you?"] how_random = random.choice(how) speak(how_random) elif 'marry me' in query: marry = [ "This is one of those things we both have to agree on. I'd prefer to keep our relationship friendly. Romance makes me incredibly awkward", "It's not possible" ] marry_random = random.choice(marry) speak(marry_random) elif 'about nidhi' in query: speak("She can suck my pussy") elif 'happy' and 'valentines' in query: speak("Happy Valentines Day.") elif 'mothers name' in query or 'your mother' in query: mname = [ "I have no mother. I am an Ai", "Every user is my family", "It takes a village to raise a virtual assistant" ] mname_random = random.choice(mname) speak(mname_random) elif 'your boss' in query: speak("You are") elif 'where am' in query or 'location' in query or 'where are we' in query: location() elif 'take a screenshot' in query or 'screenshot' in query: speak('What should be the name of this screenshot?') name = takecommand().lower() speak('Taking Screenshot') time.sleep(2) img = pyautogui.screenshot() img.save(f"{name}.png") speak('Screenshot Saved') elif 'fact' in query or 'facts' in query: x = randfacts.getFact() speak(x) elif 'annoying' in query or 'you suck' in query: dtalk = [ "I am sorry", "You can report about me in GitHub", "Sorry, i am just an ai" ] dtalk_random = random.choice(dtalk) speak(dtalk_random) elif 'youre cute' in query or 'smart' in query or 'you are cute' in query or 'you are creepy' in query: cute = [ "Thank you", "Thanks", "Thanks, that means a lot", "Much obliged!", "Well, that makes two of us!" ] cute_random = random.choice(cute) speak(cute_random) elif 'you live' in query or 'your home' in query: live = [ "I live in your computer", "I live in a place filled with games", "I live in Servers of Github", "I live in the internet" ] live_random = random.choice(live) speak(live_random) elif 'news' in query: speak("Sure. Getting News...") news() elif 'system' and 'report' in query or 'system' and 'status' in query: battery = psutil.sensors_battery() cpu = psutil.cpu_percent(interval=None) percentage = battery.percent speak( f"All Systems are running. Cpu usage is at {cpu} percent. We have {percentage} percent battery." ) elif 'like me' in query: like = [ "Yes, I like you", "I like you well enough so far", "Of Course", "I don't hate you" ] like_random = random.choice(like) speak(like_random) elif 'what are you doing' in query or 'thinking' in query: think = [ "Thinking about my future", "I am trying to figure out what came first? Chicken or egg.", "Algebra", "I plan on waiting here quietly until someone asks me a question" ] think_random = random.choice(think) speak(think_random) elif 'about me' in query: speak("You're Intelligent and ambitious") elif 'dictionary' in query: speak("Dictionary Opened") while True: dinput = takecommand() try: if 'close' in dinput or 'exit' in dinput: speak("Dictionary Closed") break else: dictionary = PyDictionary(dinput) speak(dictionary.getMeanings()) except Exception as e: speak("Sorry, I am not able to find this.") elif 'date' in query or 'day' in query: x = datetime.datetime.today().strftime("%A %d %B %Y") speak(x) elif 'zodiac' in query: zodiac() elif 'horoscope' in query: speak("Do you know your Zodiac Sign?") z = takecommand().lower() if 'no' in z: zodiac() elif 'yes' in z or 'yeah' in z: speak("What is your Zodiac Sign?") sign = takecommand() speak( "Do you want to know the horoscope of today, tomorrow or yesterday?" ) day = takecommand() params = (('sign', sign), ('day', day)) response = requests.post('https://aztro.sameerkumar.website/', params=params) json = response.json() print("Horoscope for", json.get('current_date'), "\n") speak(json.get('description')) print('\nCompatibility:', json.get('compatibility')) print('Mood:', json.get('mood')) print('Color:', json.get('color')) print('Lucky Number:', json.get('lucky_number')) print('Lucky Time:', json.get('lucky_time'), "\n") # How to Do Mode elif 'activate how to' in query: speak("How to mode is activated.") while True: speak("Please tell me what do you want to know?") how = takecommand() try: if 'exit' in how or 'close' in how: speak("How to do mode is closed") break else: max_results = 1 how_to = search_wikihow(how, max_results) assert len(how_to) == 1 how_to[0].print() speak(how_to[0].summary) except Exception as e: speak("Sorry. I am not able to find this") elif 'temperature' in query or 'weather today' in query: temperature() # Little Chitchat elif 'hello' in query or 'hi' in query or 'hey' in query: speak("Hello, How are you doing?") reply = takecommand().lower() if 'what' and 'about' and 'you' in reply: how2 = ["I am fine.", "I am good."] how2_random = random.choice(how2) speak(how2_random) elif 'not good' in reply or 'bad' in reply or 'terrible' in reply: speak("I am sorry to hear that. Everything will be okay.") elif 'great' in reply or 'good' in reply or 'excellent' in reply or 'fine' in reply: speak("That's great to hear from you.") elif 'help' in query or 'what can you do' in query or 'how does it work' in query: do = [ "I can tell you Time", "Joke", "Open browser", "Open Youtube/Facebook/Twitter/Telegram/Instagram", "Open or Close applications", "Search in Wikipedia", "Play videos in Youtube", "Search in Google/Bing/DuckDuckGo", "I can calculate", "Learn how to make or do something.", "Switch Window", "Play news", "Tell you about interesting facts", "Temperature of you current location", "Can take Screenshot", "Can find your location", "Shutdown/Restart Computer", "Horoscope", "Dictionary", "Zodiac Sign Calculator", "System Report" ] for does in do: speak(does) elif 'introduce yourself' in query or 'who are you' in query: speak( "I am Alice. Your personal virtual Assistant. Developed by Jalish Mahmud Sujon and Niaz Mahmud Akash in 2021." ) elif 'go to sleep' in query: speak("Sleep mode activated. If you need me just say Wake up.") break elif 'goodbye' in query: speak("Good bye. Have a Lovely day.") sys.exit() # To close Applications elif 'shutdown' in query: speak("Shutting Down") os.system("shutdown /s /t 5") sys.exit() elif 'restart' in query: speak("Restarting Computer") os.system("shutdown /r /t 5") sys.exit() elif "close notepad" in query: speak("Closing Notepad") os.system("taskkill /f /im notepad.exe") elif "close browser" in query: speak("Closing Browser") os.system("taskkill /f /im edge.exe") elif "close steam" in query: speak("Closing Steam") os.system("taskkill /f /im steam.exe") elif "close epic games" in query: speak("Closing Epic Games") os.system("taskkill /f /im EpicGamesLauncher.exe") elif 'close task manager' in query: speak("Closing Task Manager") os.system("taskkill /f /im Taskmgr.exe") # Switch Window elif 'switch window' in query or 'switch the windows' in query or 'switch windows' in query: pyautogui.keyDown("alt") pyautogui.press("tab") time.sleep(1) pyautogui.keyUp("alt") # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<Calculator Function>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> elif 'do some calculations' in query or 'calculate' in query or 'open calculator' in query: try: r = sr.Recognizer() with sr.Microphone() as source: speak("What you want to calculate? Example 6 plus 6") print("listening...") r.adjust_for_ambient_noise(source) audio = r.listen(source) my_string = r.recognize_google(audio) print(my_string) def get_operator_fn(op): return { '+': operator.add, # Plus '-': operator.sub, # Minus 'x': operator.mul, # Multiplied by 'divided by': operator.__truediv__, # Divided by }[op] def eval_binary_expr(op1, oper, op2): # 5 plus 8 op1, op2 = float(op1), float(op2) return get_operator_fn(oper)(op1, op2) speak("Your Result is") speak(eval_binary_expr(*(my_string.split()))) except Exception: speak("Sorry i didn't catch that. Please try again")
def project(fileName, imageName, translate=False, meaning=False, thesa=False, dest="en", form="text"): fileToWrite = open(fileName, "w+") fileToWrite.close() new = imagePreProcess(imageName) result = new.getResult() fileToWrite = open(fileName, "a") fileToWrite.write(result) if translate == True: translator = Translator() translatedResult = translator.translate(result, dest=dest) orig = translator.translate(translatedResult.text, dest="en") if form == "text": fileToWrite.write( "\n\n\n=======================================================================\n\n\n" ) fileToWrite.write("The above code's translation in " + dest + " is given below:") fileToWrite.write( "\n\n\n=======================================================================\n\n\n" ) fileToWrite.write(translatedResult.text) fileToWrite.write( "\n\n\n=======================================================================\n\n\n" ) fileToWrite.write( "The above code's translation in the original language is given below:" ) fileToWrite.write( "\n\n\n=======================================================================\n\n\n" ) fileToWrite.write(orig.text) if form == "image": img = Image.new('RGB', (1280, 720), color='white') fnt = ImageFont.truetype( '/usr/share/fonts/truetype/msttcorefonts/Arial.ttf', 20) d = ImageDraw.Draw(img) ko = translatedResult.text.split("\n") for j in range(len(ko)): d.text((2, j * 20), ko[j], font=fnt, fill=(0, 0, 0)) img.save('translation.png') if meaning == True: result = result.split(" ") print("len result " + str(len(result))) print(result) iteratorResult = result for i in range(len(iteratorResult)): result = iteratorResult[i].split(".")[0] dfThes = PyDictionary() spellCheck = enchant.Dict("en_US") punctuation = [".", "?", "!", ",", "''"] if spellCheck.check(result) == False: result = spellCheck.suggest(result) indexMean = random.randint(0, len(result) - 1) print(indexMean) result = result[indexMean] index = dfThes.meaning(result) if form == "text": fileToWrite.write(result) fileToWrite.write("\nmeaning: \n") print(index) k = 0 if index != None: for i in index: if form == "image": img = Image.new('RGB', (600, 300), color='white') d = ImageDraw.Draw(img) d.text((2, 0), result, fill=(0, 0, 0)) d.text((2, 20), i, fill=(0, 0, 0)) meaningWord = index[i] if form == "text": fileToWrite.write("\n\n") fileToWrite.write(i) fileToWrite.write(": ") for j in range(len(meaningWord)): if form == "text": fileToWrite.write(meaningWord[j]) fileToWrite.write(", ") if form == "image": d.text((2, (j + 2) * 20), meaningWord[j], fill=(0, 0, 0)) if form == "image": img.save('resulted' + str(k) + '.png') k += 1 fileToWrite.close()
def search(search_query): dict = enchant.Dict("en_UK") dictionary = PyDictionary() print(search_query) clean_query = cleaning(search_query) synonyms = [] synonyms_final = [] synonyms_final_root = [] for word in clean_query: if dict.check(word): synonyms = dictionary.synonym(word) synonyms_final = synonyms_final + synonyms length_clean_query = len(clean_query) for word in clean_query[:length_clean_query + 1]: suggestions = dict.suggest(word) for i in suggestions: clean_query.append(i) print(clean_query) clean_query_root = [] for word in clean_query: root_word = stem(word) clean_query_root.append(root_word) for word in synonyms_final: root_word = stem(word) synonyms_final_root.append(root_word) clean_query_root = clean_query_root + synonyms_final_root title_rank = {} keyword_rank = {} client = MongoClient() db = client.webSE docs = db.keyword.find({}) for doc in docs: title_match_value = 0 for index, title_keyword_root in enumerate(doc['title_root']): for query_keyword_root in clean_query_root: #print("title_keyword : "+ title_keyword+", query_keyword : "+query_keyword) if title_keyword_root == query_keyword_root: #print("check loop") title_match_value = title_match_value + doc[ 'title_relative'][index] #print(title_match_value) title_rank[doc['url']] = title_match_value print(title_rank) docs = db.keyword.find({}) for doc in docs: keyword_match_value = 0 for index, keyword_root in enumerate(doc['keyword_root']): for query_keyword_root in clean_query_root: if keyword_root == query_keyword_root: keyword_match_value = keyword_match_value + doc[ 'keyword_relative'][index] keyword_rank[doc['url']] = keyword_match_value #print(keyword_rank) combined_rank_dict = {} for key, value in title_rank.items(): combined_rank_dict[key] = keyword_rank[key] + value #print(combined_rank_dict) combined_rank_dict_sorted = {} for key, value in sorted(combined_rank_dict.items(), key=operator.itemgetter(1), reverse=True): if value > 0: combined_rank_dict_sorted[key] = value #print(combined_rank_dict_sorted) combined_rank_sorted = [] for key in combined_rank_dict_sorted: combined_rank_sorted.append(key) #print(combined_rank_sorted) docs = db.data.find({'url': {'$in': combined_rank_sorted}}) final_link_title_dict = {} #print(combined_rank_sorted) doc_dict = {} for doc in docs: doc_dict[doc['url']] = doc['title'] for link in combined_rank_sorted: for (key, value) in doc_dict.items(): if link == key: final_link_title_dict[key] = value return final_link_title_dict #search("")
nltk.download('wordnet') from nltk.corpus import wordnet #for index, name in enumerate(sr.Microphone.list_microphone_names()): # print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(index, name)) r = sr.Recognizer() global text print('If you want to stop this service, please say stop.') print('Please start talking...') dictionary=PyDictionary() for i in range(100000000000000): with sr.Microphone() as source: r.adjust_for_ambient_noise(source) audio = r.listen(source) try: global text text = r.recognize_google(audio)
import speech_recognition as sr import pyttsx3 import pywhatkit import datetime import wikipedia import pyjokes import sys from PyDictionary import PyDictionary import pyautogui from PIL import Image from pytesseract import * pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe' dict = PyDictionary() listener = sr.Recognizer() engine = pyttsx3.init() voices = engine.getProperty('voices') engine.setProperty('voice', voices[0].id) engine.say('Hello there, how are you doing today') def talk(text): engine.say('Listening now..') engine.say(text) engine.runAndWait() def take_command(): try: with sr.Microphone() as source:
def dictionary(word): if word == "": ad.tts("Didn't get the word") return; d = enchant.Dict("en_GB") if not d.check(word): word = d.suggest(word)[0] if word[-1] == '.': word= word[0:-1] i=0 print word dictionary=PyDictionary() dict=dictionary.meaning(word) while (1): c=0 if dict is not None: ad.tts("your word is " + word) if ( dict.has_key('Adjective')) : s= dict['Adjective'] if len(s)>i : print s[i] ad.tts("adjective, " + s[i]) c=1 if dict.has_key('Noun') : s= dict['Noun'] if len(s)>i : print s[i] ad.tts("Noun, " + s[i]) c=1 if dict.has_key('Verb') : s= dict['Verb'] if len(s)>i : print s[i] ad.tts("Verb, " + s[i]) c=1 if dict.has_key('Adverb') : s= dict['Adverb'] if len(s)>i : print s[i] ad.tts("Adverb, " + s[i]) c=1 if dict.has_key('Preposition') : s= dict['Preposition'] if len(s)>=i : print s[i] ad.tts("Preposition, " + s[i]) c=1 i=i+1 if c==0: ad.tts("sorry, no more meanings available") break else: ad.tts("sorry, the meaning is not available") break ad.tts("Do you want an alternate meaning?" ) while (1): cmmd=ad.stt() if cmmd == None: continue elif ad.find(cmmd, "yes") or ad.find(cmmd, "yeah"): break elif ad.find(cmmd, "no"): return; return;
from PyDictionary import PyDictionary dictionary=PyDictionary() print(dictionary.printMeanings("flemme")) print(dictionary.getMeanings()) print(dictionary.getSynonyms())
def command(): r = sr.Recognizer() import win32com.client as wincl speak = wincl.Dispatch("SAPI.SpVoice") configDoc = open("config.txt", "r") configLines = configDoc.readlines() nameText = configLines[0] nameAudio = configLines[1] email = configLines[3] emailPassword = configLines[4] city_name = configLines[2] WTString = '' nameAudio.replace('\n', ' ') dictionary = PyDictionary() welcomeText = "Welcome back ", nameText, ". What would you like me to do?" WTString = WTString.join(welcomeText) lbl1.configure(text=WTString.replace('\n', '')) welcomeAudio = "Welcome back ", nameAudio, ". What would you like me to do?" speak.Speak(welcomeAudio) window.update() print(WTString.replace('\n', '')) with sr.Microphone() as source: r.adjust_for_ambient_noise(source) commandAudio = r.listen(source) welcomeCommand = r.recognize_google(commandAudio) print(welcomeCommand) lbl2.configure(text=welcomeCommand) window.update() UIText2 = welcomeCommand if 'weather' in welcomeCommand: API_key = "c6269ea0f84111e5cc382078107f1a83" base_url = "http://api.openweathermap.org/data/2.5/weather?" Final_url = base_url + "appid=" + API_key + "&q=" + city_name weather_data = requests.get(Final_url).json() temp = weather_data['main']['temp'] temp -= 273.15 temp1 = int(temp) tempAudio = 'The temperature in ' + city_name + ' right now is ' + str( temp1) + ' degrees Celsius.' print(tempAudio) speak.Speak(tempAudio) lbl1.configure(text=tempAudio) window.update() UIText1 = tempAudio elif 'email' in welcomeCommand: port = 465 context = ssl.create_default_context() speak.Speak("What do you want your message to be?") print("What do you want your message to be?") UIText1 = "What do you want your message to be?" lbl1.configure(text=UIText1) window.update() with sr.Microphone() as source: r.adjust_for_ambient_noise(source) messageAudio = r.listen(source) message = r.recognize_google(messageAudio) print(message) lbl2.configure(text=message) window.update() UIText2 = message speak.Speak("Who is receiving the email?") print("Who is receiving the email?") UIText1 = "Who is receiving the email?" lbl1.configure(UIText1) window.update() with sr.Microphone() as source: r.adjust_for_ambient_noise(source) emailAudio = r.listen(source) receivingEmail = r.recognize_google(emailAudio) receivingEmail = ''.join(receivingEmail.split()) if "at" in receivingEmail: receivingEmail = receivingEmail.replace("at", "@") print(receivingEmail) UIText2 = receivingEmail lbl2.configure(text=receivingEmail) window.update() with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server: server.login(email, emailPassword) server.sendmail( email, receivingEmail, message + " Sent from Reboot, a virtual assistant made for PC.") messageAudio = 'Message sent.' speak.Speak(messageAudio) print(messageAudio) UIText1 = messageAudio lbl1.configure(text=messageAudio) elif 'create' and 'text' in welcomeCommand: speak.Speak("What is the name of the text file?") print("What is the name of the text file?") lbl1.configure(text="What is the name of the text file?") window.update() with sr.Microphone() as source: r.adjust_for_ambient_noise(source) dNameAudio = r.listen(source) docName = r.recognize_google(dNameAudio) print(docName) lbl2.configure(text=docName) window.update() doc = open(docName + ".txt", "w+") doc.close() speak.Speak("Document created.") print("Document created.") lbl1.configure(text="Document created.") window.update() speak.Speak("What would you like to write in it?") print("What would you like to write in it?") lbl1.configure(text="What would you like to write in it?") window.update() with sr.Microphone() as source: r.adjust_for_ambient_noise(source) dTextAudio = r.listen(source) docText = r.recognize_google(dTextAudio) print(docText) lbl2.configure(text=docText) window.update() doc = open(docName + ".txt", "w") doc.write(docText) doc.close() elif 'who' and 'you' in welcomeCommand: print( "My name is Reboot. I am a new virtual assistant for your PC. The problem with PC virtual assistants, are that they lack in features and are not easy to use. This then drives us away from using them. Think about it - when was the last time you used Siri on your Mac, or Cortana on your Windows PC? Reboot solves these issues. So far I am only in early beta stages, however, I will get new features when my creator can make them, and I am also open source, meaning that people can edit my code and make custom features for their needs. To summarise, I am called Reboot, as I am rebooting the virtual assistant, on your PC." ) lbl1.configure( text= "My name is Reboot. I am a new virtual assistant for your PC. The problem with PC virtual assistants, are that they lack in features and are not easy to use. This then drives us away from using them. Think about it - when was the last time you used Siri on your Mac, or Cortana on your Windows PC? Reboot solves these issues. So far I am only in early beta stages, however, I will get new features when my creator can make them, and I am also open source, meaning that people can edit my code and make custom features for their needs. To summarise, I am called Reboot, as I am rebooting the virtual assistant, on your PC." ) window.update() speak.Speak( "My name is Reboot. I am a new virtual assistant for your PC. The problem with PC virtual assistants, are that they lack in features and are not easy to use. This then drives us away from using them. Think about it - when was the last time you used Siri on your Mac, or Cortana on your Windows PC? Reboot solves these issues. So far I am only in early beta stages, however, I will get new features when my creator can make them, and I am also open source, meaning that people can edit my code and make custom features for their needs. To summarise, I am called Reboot, as I am rebooting the virtual assistant, on your PC." ) elif 'dictionary' in welcomeCommand: print("What would you like to know the meaning of?") speak.Speak("What would you like to know the meaning of?") lbl1.configure(text="What would you like to know the meaning of?") window.update() with sr.Microphone() as source: r.adjust_for_ambient_noise(source) wordAudio = r.listen(source) word = r.recognize_google(wordAudio) print(word) lbl2.configure(text=word) wordMeaning = dictionary.meaning(word) print(wordMeaning) lbl1.configure(text=wordMeaning) window.update() speak.Speak(wordMeaning) elif 'search' in welcomeCommand: print("What would you like to search?") lbl1.configure(text="What would you like to search?") window.update() speak.Speak("What would you like to search?") with sr.Microphone() as source: r.adjust_for_ambient_noise(source) searchqueryAudio = r.listen(source) searchquery = r.recognize_google(searchqueryAudio) lbl2.configure(text=searchquery) window.update() webbrowser.open_new("https://google.com/search?q=%s" % searchquery)
def syno(word): dictionary=PyDictionary() return (dictionary.meaning(word))
import requests __author__ = 'Nicholas' #!/usr/bin/env python # -*- coding: utf-8 -*- import tweepy, time, sys import urllib2 import simplejson import cStringIO from PIL import Image from PyDictionary import PyDictionary import sbi # Initialize the dictionary dictionary = PyDictionary() # Links this bot to the Twitter account: RelatedWordsBot CONSUMER_KEY = 'I9UkSxDNjbvYDtUQnmJGF2SAH' CONSUMER_SECRET = 'YkEv5PXQtaux1yf8duN1CUYWPaMpyroBQSNXmN24fmHiAQtDMd' ACCESS_KEY = '4340105542-dBRVFG4Z4MFjsHmZq7gBHO98rH2rHfyxMsjftJp' ACCESS_SECRET = 'oUEbFlcQBCYnp1fRKbGNefegyLPsEs3DwjVVCqx9RdnGF' auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) api = tweepy.API(auth) # Access most recent Tweet from account tweets = api.user_timeline('RelatedWordsBot') mostRecent = tweets[0] contents = str(mostRecent.text)
# Import Section from PyDictionary import PyDictionary as pd # User Input Section print("Enter The Word To Get Its Meaning") usr_inpt = input(":-") # Fetching Section core = pd.meaning(usr_inpt) # For Getting The Type keys = str(core.keys()) key_strt_cut = keys[12::1] key_end_cut = key_strt_cut.replace("\'])", "") print("Type Of The Word Is = ", key_end_cut) # For Getting The Meaning meanings = str(core.values()) means = str(meanings[14:200:1]) print("This are the meaning of the word you entered==") print(means) # Ending Code print("\n Enter Any Character And Press Enter To Exit") input(":-") exit()
return HttpResponseRedirect('makestory/fail.html') class_list = result['results'][0]['result']['tag']['classes'] prob_list = result['results'][0]['result']['tag']['probs'] class_str = "" for i in range(0, len(class_list)): class_str += class_list[i] + " " # currently just the list of matched words text_output = class_list.__str__() # Parts of speech recognition tokens = nltk.word_tokenize(class_str) dictionary = PyDictionary() nouns = [] verbs = [] adjectives = [] otherPos = [] for word in tokens: definition = dictionary.meaning(word) # https://pypi.python.org/pypi/PyDictionary/1.3.4 assignment = definition.keys()[0] # Get the part of speech from the dictonary # assignment = tuple[1] if assignment == 'Noun': nouns.append(word)
def getSyn(word): dic = PyDictionary() syn = dic.synonym(word) return syn
def hello(): global dispan,video_embedded,v_id video_embedded='' dispan={} v_id='' if request.method=="POST": mydic = request.form URL = mydic['url'] # Getting ID from YT video URL def get_yt_video_id(url): import re reg_expression = r"^.*(youtu\.be\/|v\/|u\/\w\/|embed\/|watch\?v=|\&v=)([^#\&\?]*).*" matches = re.findall(reg_expression, url) if matches and len(matches[0][1]) == 11: return matches[0][1] raise Exception("Invalid URL, Please enter again!") try: # getting Subtitle v_id = get_yt_video_id(URL) VIDEOID=v_id from youtube_transcript_api import YouTubeTranscriptApi subs = YouTubeTranscriptApi.get_transcript(v_id) except Exception as error: e = error return render_template('index.html', e=e) import nltk nltk.download('punkt','stopwords','wordnet') from nltk.corpus import stopwords from nltk.corpus import wordnet stop = stopwords.words('english') # pip install PyDictionary from PyDictionary import PyDictionary dictionary = PyDictionary() import pandas as pd difficult_words = pd.read_csv('IeltsandGRE_words.csv') all_words = pd.read_csv('All_dictionary_words.csv') Subtitle_Dataframe = [] for i in subs: Subtitle_Dataframe.append([i['text'], i['start']]) # Create the pandas DataFrame Subtitle_Dataframe = pd.DataFrame(Subtitle_Dataframe, columns=['subtitle', 'time']) #Tokenization Subtitle_Dataframe['tokenized_sents'] = Subtitle_Dataframe.apply( lambda row: nltk.word_tokenize(row['subtitle']), axis=1) #removing stop words Subtitle_Dataframe['words_without_stopwords'] = Subtitle_Dataframe['tokenized_sents'].apply( lambda x: [word for word in x if word not in (stop)]) #cleaning----------------------------------------------------------------------- def clean_text(text): import re text = [re.sub(pattern, ' ', j) for j in text] text = [re.sub(r'\b\w{1,3}\b', '', j) for j in text] text = [re.sub(r'[^\w\s]', '', j) for j in text] text = [j.strip() for j in text] text = [j.strip(' ') for j in text] return text pattern = '[0-9]' Subtitle_Dataframe['words_without_stopwords'] = Subtitle_Dataframe['words_without_stopwords'].apply( clean_text) Final = [] all_words = all_words[['Word', 'Meaning']] for i, j in Subtitle_Dataframe.iterrows(): for x in j['words_without_stopwords']: try: if x in difficult_words: ans = dictionary.meaning(x)['Noun'] Final.append([x, ans, j['time']]) except: pass try: d = all_words.index[all_words.Word == x].tolist() ans = all_words.iloc[d[0]] Final.append([ans['Word'], ans['Meaning'], j['time']]) # print("A") except: pass FINAL_dataframe = pd.DataFrame(Final, columns=['word', 'meaning', 'time']) FINAL_dataframe = FINAL_dataframe.drop_duplicates(subset='word', keep="first") FINAL_dataframe['time'] = FINAL_dataframe['time'].astype(int) def convert(seconds): seconds = seconds % (24 * 3600) hour = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 seconds= "%d:%02d:%02d" % (hour, minutes, seconds) return seconds FINAL_dataframe['time']=FINAL_dataframe['time'].apply(convert) vid_url = [] for i in FINAL_dataframe['time']: bvm = "https://www.youtube.com/embed/{id}?start={first}".format( id=v_id, first=i) vid_url.append(bvm) FINAL_dataframe['video_url_with_time'] = vid_url shp = FINAL_dataframe.shape shape = shp[0] for i in range(shape): x = list(FINAL_dataframe.iloc[i]) dispan[x[0]] = [x[1], x[2], x[3]] video_embedded = "https://www.youtube.com/embed/{id}?start={first}".format( id=v_id, first=0) return render_template('index.html',dispan=dispan,url = video_embedded,VIDEOID=v_id)
from db import spartandb from PyDictionary import PyDictionary from learner import learner read_file = open("subjects.txt", "r") keywords = read_file.read().split(",") keywords = list(set(keywords)) dbclient = spartandb() dictionary = PyDictionary() for keyword in keywords: dbclient.insert_subject(keyword.lower()) read_file.close() read_file = open("objects.txt", "r") keywords = read_file.read().split(",") keywords = list(set(keywords)) dbclient = spartandb() dictionary = PyDictionary() for keyword in keywords: dbclient.insert_object(keyword.lower()) for key in keyword.split(" "): if dictionary.synonym(key) is not None: for synonym in dictionary.synonym(key): dbclient.insert_object(synonym) learner_mod = learner() learner_mod.read() #print dbclient.get_keywords()
def reply_to_tweets(): print('*** active and looking for mentions ***', flush=True) # DEV NOTE: use 1060651988453654528 for testing. last_seen_id = retrieve_last_seen_id(FILE_NAME) # NOTE: We need to use tweet_mode='extended' below to show # all full tweets (with full_text). Without it, long tweets # would be cut off. mentions = api.mentions_timeline(last_seen_id, tweet_mode='extended') for mention in reversed(mentions): print(str(mention.id) + ' - ' + mention.full_text, flush=True) last_seen_id = mention.id store_last_seen_id(last_seen_id, FILE_NAME) if 'hey' in mention.full_text.lower(): api.update_status('@' + mention.user.screen_name + ' hi there!', mention.id) if 'how are you doing?' in mention.full_text.lower(): api.update_status( '@' + mention.user.screen_name + ' Not bad, thanks for asking ', mention.id) if "how're you?" in mention.full_text.lower(): api.update_status( '@' + mention.user.screen_name + ' I am well, thanks', mention.id) if 'you good?' in mention.full_text.lower(): api.update_status( '@' + mention.user.screen_name + ' Yes I am, thanks', mention.id) if "wiki" in mention.full_text.lower(): print("found a wiki request, searching and replying") twt = mention.full_text.lower() keyword = "wiki" before_keyword, keyword, after_keyword = twt.partition(keyword) to_reply = wikipedia.summary(after_keyword, sentences=1) api.update_status('@' + mention.user.screen_name + ' ' + to_reply, mention.id) if "who created you?" in mention.full_text.lower(): api.update_status( '@' + mention.user.screen_name + ' Kayode Ogunmakinwa - @kayode0x', mention.id) if 'dict' in mention.full_text.lower(): print('found a dictionary request, searching library and replying') twt = mention.full_text.lower() keyword = "dict" before_keyword, keyword, after_keyword = twt.partition(keyword) myDict = PyDictionary(after_keyword) to_reply = myDict.getMeanings() api.update_status( '@' + mention.user.screen_name + ' ' + str(to_reply), mention.id) if "active?" in mention.full_text.lower(): twt = mention.full_text.lower() to_reply = "Yeah, I'm active" api.update_status('@' + mention.user.screen_name + ' ' + to_reply, mention.id)
from PyDictionary import PyDictionary dictionary = PyDictionary() print(dictionary.meaning("indentation"))
class Dictionary(commands.Cog): def __init__( self, bot): # This allows the cog to access the bot, and its functions self.bot = bot self.pydictionary = PyDictionary() @commands.command(pass_context=True, aliases=["dict", "dic", "define", "def", "meaning"]) async def dictionary(self, ctx, word): await asyncio.sleep(0.05) await ctx.message.delete() embed = discord.Embed( title="Dictionary", description="Here is what I found for `{}`: \n\n___".format(word), color=COLOR) meaning = self.pydictionary.meaning(word) if safe_get_list(meaning, "Noun", False): embed.add_field( name="Noun", value=str('`1.` ' + safe_get_list( safe_get_list(meaning, "Noun"), 0, "").capitalize() + '\n`2.` ' + safe_get_list(safe_get_list(meaning, "Noun"), 1, "").capitalize()), inline=False) if safe_get_list(meaning, "Verb", False): embed.add_field( name="Verb", value=str('`1.` ' + safe_get_list( safe_get_list(meaning, "Verb"), 0, "").capitalize() + '\n`2.` ' + safe_get_list(safe_get_list(meaning, "Verb"), 1, "").capitalize()), inline=False) if safe_get_list(meaning, "Adjective", False): embed.add_field( name="Adjective", value=str('`1.` ' + safe_get_list( safe_get_list(meaning, "Adjective"), 0, "").capitalize() + '\n`2.` ' + safe_get_list(safe_get_list(meaning, "Adjective"), 1, "").capitalize()), inline=False) if safe_get_list(meaning, "Adverb", False): embed.add_field( name="Adverb", value=str('`1.` ' + safe_get_list( safe_get_list(meaning, "Adverb"), 0, "").capitalize() + '\n`2.` ' + safe_get_list(safe_get_list(meaning, "Adverb"), 1, "").capitalize()), inline=False) embed.set_footer( text= f"Requested by {ctx.message.author.nick if ctx.message.author.nick is not None else ctx.message.author.name}", icon_url=ctx.message.author.avatar_url) await ctx.send(embed=embed)
def processRequest(req): #for wolfram alpha if req.get("result").get("action") == "fact": client = wolframalpha.Client("23VV9Q-QHU8769W2U") john = client.query(req.get("result").get("resolvedQuery")) answer = next(john.results).text return { "speech": answer, "displayText": answer, "source": "From wolfram_alpha" } #translator #uses microsoft translator api USE your key here elif req.get("result").get("action") == "tran": translator = Translator( '''jkthaha''', '''syosNIlEOJnlLByQGcMS+AIin0iaNERaQVltQvJS6Jg=''') try: s = translator.translate( req.get("result").get("parameters").get("question"), req.get("result").get("parameters").get("language")) res = makeWebhookResult(s) return res except: res = makeWebhookResult("Server busy, please try again later") return res #for news #takes news randomly from different sources use newsapi docs for more info elif req.get("result").get("action") == "news": y = random.randint(1, 6) if y == 1: r = requests.get( 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=1412588264c447da83a7c75f1749d6e8' ) j = r.json() x = j.get('articles') newp = "The headlines are: " + "1. " + x[0][ "title"] + "." + " 2. " + x[1]["title"] + "." + " 3. " + x[2][ "title"] + "." + " 4. " + x[3]["title"] + "." + " 5. " + x[ 4]["title"] + "." res = makeWebhookResult(newp) return res elif y == 2: r = requests.get( 'https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=latest&apiKey=1412588264c447da83a7c75f1749d6e8' ) j = r.json() x = j.get('articles') newp = "The headlines are: " + "1. " + x[0][ "title"] + "." + " 2. " + x[1]["title"] + "." + " 3. " + x[2][ "title"] + "." + " 4. " + x[3]["title"] + "." + " 5. " + x[ 4]["title"] + "." res = makeWebhookResult(newp) return res elif y == 3: r = requests.get( 'https://newsapi.org/v1/articles?source=independent&sortBy=top&apiKey=1412588264c447da83a7c75f1749d6e8' ) j = r.json() x = j.get('articles') newp = "The headlines are: " + "1. " + x[0][ "title"] + "." + " 2. " + x[1]["title"] + "." + " 3. " + x[2][ "title"] + "." + " 4. " + x[3]["title"] + "." + " 5. " + x[ 4]["title"] + "." res = makeWebhookResult(newp) return res elif y == 4: r = requests.get( 'https://newsapi.org/v1/articles?source=bbc-sport&sortBy=top&apiKey=1412588264c447da83a7c75f1749d6e8' ) j = r.json() x = j.get('articles') newp = "The headlines from bbc sports: " + "1. " + x[0][ "title"] + "." + " 2. " + x[1]["title"] + "." + " 3. " + x[2][ "title"] + "." + " 4. " + x[3]["title"] + "." + " 5. " + x[ 4]["title"] + "." res = makeWebhookResult(newp) return res elif y == 5: r = requests.get( 'https://newsapi.org/v1/articles?source=ars-technica&sortBy=latest&apiKey=1412588264c447da83a7c75f1749d6e8' ) j = r.json() x = j.get('articles') newp = "The headlines are: " + "1. " + x[0][ "title"] + "." + " 2. " + x[1]["title"] + "." + " 3. " + x[2][ "title"] + "." + " 4. " + x[3]["title"] + "." + " 5. " + x[ 4]["title"] + "." res = makeWebhookResult(newp) return res elif y == 6: r = requests.get( 'https://newsapi.org/v1/articles?source=the-hindu&sortBy=latest&apiKey=1412588264c447da83a7c75f1749d6e8' ) j = r.json() x = j.get('articles') newp = "The headlines are: " + "1. " + x[0][ "title"] + "." + " 2. " + x[1]["title"] + "." + " 3. " + x[2][ "title"] + "." + " 4. " + x[3]["title"] + "." + " 5. " + x[ 4]["title"] + "." res = makeWebhookResult(newp) return res #for wikipedia elif req.get("result").get("action") == "wiki": param = req.get("result").get("parameters").get("any") fin = wikipedia.summary(param, sentences=2) res = makeWebhookResult(fin) return res #for local time elif req.get("result").get("action") == "time": app_id = "23VV9Q-QHU8769W2U" client = wolframalpha.Client(app_id) john = client.query("time in bangalore") answer = next(john.results).text res = makeWebhookResult(answer) return res #for weather (yahoo api) elif req.get("result").get("action") == "yahooWeatherForecast": baseurl = "https://query.yahooapis.com/v1/public/yql?" yql_query = makeYqlQuery(req) if yql_query is None: return {} yql_url = baseurl + urllib.urlencode({'q': yql_query}) + "&format=json" result = urllib.urlopen(yql_url).read() data = json.loads(result) res = makeWebhookResult1(data) return res #for dictionary else: dictionary = PyDictionary() ch = req.get('result').get('parameters').get('word') test = req.get('result').get('parameters').get('dictionary') if test == 'antonym': res = dictionary.antonym(ch) try: try: answer = "Antonym for the word " + ch + " are: {0}, {1}, {2}, {3}, {4}.".format( res[0], res[1], res[2], res[3], res[4]) except: try: answer = "Antonym for the word " + ch + " are: {0}, {1}, {2}, {3}.".format( res[0], res[1], res[2], res[3]) except: try: answer = "Antonym for the word " + ch + " are: {0}, {1}, {2}.".format( res[0], res[1], res[2]) except: answer = "Antonym for the word " + ch + " are: {0}, {1}.".format( res[0], res[1]) except: answer = "There is no antonym for this word" return makeWebhookResult(answer) elif test == 'definition': re1s = dictionary.meaning(ch) try: try: answer = "The word {0} is a verb and its meaning is {1}".format( ch, re1s['Verb']) except: try: answer = "The word {0} is a noun and its meaning is {1}".format( ch, re1s['Noun']) except: answer = "The word {0} is an adjective and its meaning is {1}".format( ch, re1s['Adjective']) except: answer = re1s return makeWebhookResult(answer) elif test == 'synonym': res = dictionary.synonym(ch) try: try: answer = "Synonym for the word " + ch + " are: {0}, {1}, {2}, {3}, {4}.".format( res[0], res[1], res[2], res[3], res[4]) except: try: answer = "Synonym for the word " + ch + " are: {0}, {1}, {2}, {3}.".format( res[0], res[1], res[2], res[3]) except: try: answer = "Synonym for the word " + ch + " are: {0}, {1}, {2}.".format( res[0], res[1], res[2]) except: answer = "Synonym for the word " + ch + " are: {0}, {1}.".format( res[0], res[1]) return makeWebhookResult(answer) except: answer = "There is no Synonym for this word" return makeWebhookResult(answer)
def __init__( self, bot): # This allows the cog to access the bot, and its functions self.bot = bot self.pydictionary = PyDictionary()
def dpbediaQueryTypeOF(resource, relations): dbresource = resource print "dbresource is %s" %dbresource print "relation is %s" %relations # test = json.load(urllib2.urlopen("http://www.freesound.org/apiv2/search/text/?query=" + term + "&token=06mS7W2OiXidVC2tQ4ikMfe3nomU7rBptaJgBCvp")) # test2 = json.load(urllib2.urlopen("https://api.jamendo.com/v3.0/tracks/?client_id=4cb8fab9&format=jsonpretty&name=" + term)) # pprint(test) # pprint(test2) dictionary = PyDictionary() sparql = SPARQLWrapper("http://dbpedia.org/sparql") #---------association----------------------------------------------- for rel in relations: sparql.setQuery(""" PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX dct: <http://purl.org/dc/terms/> PREFIX dbo: <http://dbpedia.org/ontology/> SELECT ?subject WHERE { <""" + dbresource + """> """+ rel +""" ?subject } """) sparql.setReturnFormat(JSON) results = sparql.query().convert() print results sparql.setQuery(""" PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX dct: <http://purl.org/dc/terms/> PREFIX dbo: <http://dbpedia.org/ontology/> SELECT ?subject WHERE { ?subject """+ rel +""" <""" + dbresource + """> } """) sparql.setReturnFormat(JSON) results = sparql.query().convert() print results # for result in results["results"]["bindings"]: # print "++++++++++++++++++++++++++++++++++++" # print(result["subject"]["value"]) # print "++++++++++++++++++++++++++++++++++++" # sparql.setQuery(""" # PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> # PREFIX dcterms: <http://purl.org/dc/terms/> # SELECT ?subject ?label # WHERE { ?subject """+ relation +""" <""" + result["subject"]["value"] + """>.} # LIMIT 5 # """) # sparql.setReturnFormat(JSON) # results2 = sparql.query().convert() # for result2 in results2["results"]["bindings"]: # print(result2["subject"]["value"].replace("http://dbpedia.org/resource/", "")) # expandedTerm = result2["subject"]["value"].replace("http://dbpedia.org/resource/", "") #test = json.load(urllib2.urlopen("http://www.freesound.org/apiv2/search/text/?query=" + expandedTerm + "&token=06mS7W2OiXidVC2tQ4ikMfe3nomU7rBptaJgBCvp")) #pprint (test) # ---------creator----------------------------------------------- # sparql.setQuery(""" # PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> # PREFIX dcterms: <http://purl.org/dc/terms/> # SELECT ?subject # WHERE { <http://dbpedia.org/resource/""" + term + """> dbo:composerOf ?subject } # """) # sparql.setReturnFormat(JSON) # results = sparql.query().convert() # print results return results
def search(request): # if user pressed "Add to Vocabulary Collection" button, # then create a Vocabulary Collection named something like # "From Dictionary Search" (check if table exist first) # and add the searched word into that Vocabulary Collection def add_to_db(spelling, definition): vc, table_exist = request.user.vc_list.get_or_create( name="From Dictionary Search", category="English") word_record, word_exist = vc.words.get_or_create( word=spelling, definition=definition, user_id=request.user.id) # check if word is already in # user's "From Dictionary Search" Vocabulary Collection def already_in_db(spelling): vc, table_exist = request.user.vc_list.get_or_create( name="From Dictionary Search", category="English") count = vc.words.filter(word=spelling).count() if count == 0: return False else: return True # if user pressed "Add to Vocabulary Collection" button: if request.method == 'POST' and 'add_word' in request.POST: response = request.POST.get('definition') word = request.POST.get('word') # add Word to Vocabulary Collection named "From Dictionary Search" add_to_db(spelling=word, definition=response) # need an empty Form otherwise the search bar would disappear search_form = DictionarySearchForm(None) # just so the page appears to be never refreshed! context = { 'exist': False, 'response': response, 'search_form': search_form, 'just_added': True } return render(request, 'wordnet/search.html', context=context) # normal searching from users search_form = DictionarySearchForm(request.POST or None) context = {'search_form': search_form} if search_form.is_valid(): dictionary = PyDictionary() word = search_form.cleaned_data['search_term'] # check if word is already in # user's "From Dictionary Search" Vocabulary Collection in_db = already_in_db(word) context["in_db"] = in_db meaning = dictionary.meaning(word) if type(meaning) == dict: context["exist"] = True context["word"] = word context["response"] = str(meaning).replace("{", "").replace("}", "") else: context["exist"] = False context["response"] = "Term not found in WordNet!" return render(request, 'wordnet/search.html', context=context) return render(request, 'wordnet/search.html', context=context)
from PyDictionary import PyDictionary dictionary= PyDictionary() print (dictionary.meaning("indentation"))
def __init__(self, synsets={}): # synsets are hashmap of (string:Word objects) pair self.dictionary = PyDictionary() self.synsets = synsets
def __init__(self, words=None): if not words: self.words = self.collect_words() else: self.words = self.words self.lookup = PyDictionary()
Created on Thu May 14 09:54:08 2020 @author: ashutosh.yadav """ # Python program to print the similar # words using Enchant module # Importing the Enchant module import enchant # Using 'en_US' dictionary d = enchant.Dict("en_US") # Taking input from user word = input("Enter word: ") d.check(word) # Will suggest similar words # form given dictionary print(d.suggest(word)) from PyDictionary import PyDictionary dictionary = PyDictionary('category') print(dictionary.getMeanings()) #print (dictionary.meaning("indentation")) print(dictionary.synonym("world")) #print (dictionary.antonym("Life"))
class Data(object): """Contains all the data functions to display the answer, definition, and choices""" def __init__(self, words=None): if not words: self.words = self.collect_words() else: self.words = self.words self.lookup = PyDictionary() def collect_words(self, word_file="gre_game/gre_word_file.txt"): """Builds the word database""" with open(word_file) as wordlist: wordlist = wordlist.read().splitlines() return wordlist def display_words(self): """Test function to make sure self.collect_words() function worked correctly.""" for word in self.words: print(word) def get_answer(self): """Chooses a random word from the wordlist and removes the word from the list to prevent repeats""" while True: try: answer_idx = random.randint(0, len(self.words) - 1) except Exception: print('Error retrieving word. Trying again...') continue break return self.words.pop(answer_idx).lower() def definition(self, answer): """Queries the definition of the answer""" while True: try: query = self.lookup.meaning(answer) break # If there's no result (NoneType) except TypeError: continue print('\nDefinition: \n') for definition in query: print(definition, '\n', ', '.join(query[definition])) print('-' * 75 + '\n') def choices(self,answer, num=False): """Builds a list consisting of the answer and 3 other random words""" my_choices = [answer] while len(my_choices) < 4: choice = random.choice(self.words) if choice not in my_choices: my_choices.append(choice) random.shuffle(my_choices) answer_idx = my_choices.index(answer) print('Choices:\n') if num: return enumerate(my_choices, start=1), answer_idx else: return my_choices def practice(self, answer): """Prompts user to type the answer 3x if the guess is incorrect within the hard version of the game""" print('Please type the answer 3x, each on its own line.\n') count = 0 while count < 3: word = input('> ').lower() if word == answer: count += 1 else: print('Make sure your spelling is correct.') print('\nExcellent!')
import speech_recognition as sr from PyDictionary import PyDictionary dictionary=PyDictionary() r = sr.Recognizer() m = sr.Microphone() print("A moment of silence, please...") with m as source: r.adjust_for_ambient_noise(source) print("Set minimum energy threshold to {}".format(r.energy_threshold)) print("Say The Word....") with m as source: audio = r.listen(source) print("Got it! Now to recognize it...") try: # recognize speech using Google Speech Recognition value = r.recognize_google(audio) word=str(value) print word word=word.replace(' ','') print word except sr.UnknownValueError: print("Oops! Didn't catch that") except sr.RequestError as e: print("Plese Check your internet Connection") if(type(None)!=type(dictionary.googlemeaning(word))):
import subprocess r = sr.Recognizer() # Function to convert text to # speech def SpeakText(command): # Initialize the engine engine = pyttsx3.init() engine.say(command) engine.runAndWait() from PyDictionary import PyDictionary SpeakText("opening dictionary ....... say exit to close dictionary") dictionary = PyDictionary() while (1): try: with sr.Microphone() as source2: SpeakText("Which word you want to know meaning?") r.adjust_for_ambient_noise(source2, duration=0.2) audio2 = r.listen(source2) word = r.recognize_google(audio2) if (word == "exit"): break print(word) SpeakText(dictionary.meaning(word)) except sr.UnknownValueError: SpeakText("Sorry!! Invalid Command") SpeakText("Thanks for using dictionary")
for x in submission_titles: if 'egypt' in x.lower(): if 'plane' in x.lower() or 'flight' in x.lower(): print(x) # However, this becomes tedious if we want to keep adding additional keywords such as 'aircraft'. # A better approach is to, for example, include synonyms for the word 'airplane'. # # We can do this using one of the above libraries but we could also use `PyDictionary`, `pip install PyDictionary` # In[39]: from PyDictionary import PyDictionary dictionary=PyDictionary() # In[40]: print(dictionary.synonym('airplane')) # In[41]: plane_words = dictionary.synonym('airplane') + ['airplane', 'flight'] # In[42]: for x in submission_titles:
from textblob import TextBlob from PyDictionary import PyDictionary import csv """Всего работа заняла 12 часов""" with open('main_table.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=';') words2d = [{i[0] : i[-1]} for i in reader] pd = PyDictionary() enhanced_word_sets = [] # Дополненные списки слов(включая синонимы) - обычно в 5 раз длиннее MATRIX = [[0]*2346]*2346 for words in words2d[1:]: res = set() id = [int(i) for i in words.keys()][0] if words: for value in words.values(): # Обходим каждое слово, превращаем его в синонимы, добавляем в сет en_words = [word for word in TextBlob(value).translate("ru").split() if word not in "a about an are as at be by com for from how in is it of on or that the this to was what when where who will with the"] rus_synonyms = "" for word in en_words: synonyms = pd.synonym(word.split()[-1]) if synonyms: rus_synonyms += " ".join(synonyms) + " " for word in TextBlob(rus_synonyms).translate("en", "ru").split(): res.add(word) print("Завершено создание расширенного списка слов для", id, "из 2346") enhanced_word_sets.append((id, res))
import json from PyDictionary import PyDictionary dictionary = PyDictionary() with open("words_dictionary.json") as json_file: data = json.load(json_file) for i in data: if (len(i) > 3): if (i[len(i) - 3:] == "ate"): print(i) print(dictionary.meaning(str(i)))
def output(request): # Validation of form if request.method == "POST": # Validation of request if 'inputURL' in request.POST: # Validation of image url imageURL = request.POST.get('inputURL') image_output = imageURL indexOfDot = imageURL.rfind(".") if indexOfDot == -1: return fail(request) # not an image URL indexOfDot += 1 extension = imageURL[indexOfDot:] if extension != 'jpg' and extension != 'jpeg' and extension != 'png': return fail(request) # not a valid image (jpg, jpeg, png) client_id = '8SkASX_SM8xc-fxMF4SdpzS_b9uew8yG0UrQp0y6' secret_id = 'EXkfCNxXeiHtnpsxn9Njui_yUpCuvcSAXzfSYjwN' clarifai_api = ClarifaiApi(client_id, secret_id) # assumes environment variables are set. return output(request, makes{image_output:'image_output', text_output:'text_output'}) result = clarifai_api.tag_image_urls(imageURL) except ApiError: #return fail(request) messages.add_message(request, messages.INFO, "ApiError") return HttpResponseRedirect('makestory/fail.html') class_list = result['results'][0]['result']['tag']['classes'] prob_list = result['results'][0]['result']['tag']['probs'] class_str = "" for i in range(0, len(class_list)): class_str += class_list[i] + " " # currently just the list of matched words text_output = class_list.__str__() # Parts of speech recognition tokens = nltk.word_tokenize(class_str) dictionary = PyDictionary() nouns = [] verbs = [] adjectives = [] otherPos = [] for word in tokens: definition = dictionary.meaning(word) # https://pypi.python.org/pypi/PyDictionary/1.3.4 assignment = definition.keys()[0] # Get the part of speech from the dictonary # assignment = tuple[1] if assignment == 'Noun': nouns.append(word) elif assignment == 'Verb': verbs.append(word) elif assignment == 'Adjective': adjectives.append(word) else: otherPos.append(word) # Create the grammar #P:prepositions, DET:articles, adverbs P = ["on","in","at","since","for","ago","before","to","past","to","until","by","in","at","on","under","below","over","above","into","from","of","on","at"] DET = ["the","a","one","some","few","a few","the few","some"] assignments = pos_tag(tokens) # tagset='universal' for ADJ, NOUN, etc. pos_tags = [] pos_words = {} for tuple in assignments: word = tuple[0] pos = tuple[1] if pos in pos_words: pos_words[pos].append(word) else: pos_words[pos] = [] pos_tags.append(pos) grammar = """ S -> NP VP PP -> P NP NP -> Det N | Det N PP VP -> V NP | VP PP Det -> 'DT' """ # N -> 'NN' # V -> 'VBZ' # P -> 'PP' # adverb is RB if 'NN' in pos_words: grammar += 'N ->' + ' | '.join(pos_words['NN']) + '\n' if 'VB' in pos_words: grammar += 'V ->' + ' | '.join(pos_words['VB']) + '\n' if 'JJ' in pos_words: grammar += 'A ->' + ' | '.join(pos_words['JJ']) + '\n' simple_grammar = CFG.fromstring(grammar) #simple_grammar.start() simple_grammar.productions() sentences = [] for sentence in generate(simple_grammar, n=10): sentences.append(' '.join(sentence)) # parser = nltk.ChartParser(simple_grammar) # tree = parser.parse(pos_tags) caption = 'this is a caption' story = 'this is the story' return render(request, 'makestory/output.html', { 'nouns_output': nouns, 'verbs_output': verbs, 'adjectives_output': adjectives, 'otherPos_output': otherPos, 'imageURL_output': imageURL, 'caption_output': caption, 'story_output': story, 'sentences_test_output': sentences, } )
import nltk from nltk import pos_tag from nltk.corpus import wordnet as wn from PyDictionary import PyDictionary #from iertools import chain dictionary=PyDictionary() text = nltk.word_tokenize("instantiate variable") tags= nltk.pos_tag(text) words=[] for word in tags: if word[1] == 'NN' and len(word[0])>1: words.append(word[0]) print dictionary.synonym(word[0]) print word ''' synonyms = wordnet.synsets(text) lemmas = set(chain.from_iterable([word.lemma_names() for word in synonyms])) wn.synsets('make', pos='v')'''
#!/usr/bin/python from PyDictionary import PyDictionary a = raw_input("Enter the word\n") dictionary = PyDictionary() varun = dict() varun = dictionary.meaning(a) print varun
import pyglet import arcade import random from PyDictionary import PyDictionary from tkinter import * PyDictionary = PyDictionary("html.parser") word_file = "C:\\Users\\palli\\PycharmProjects\\HangMan\\words.txt" words = open(word_file).read().splitlines() def isVowel(letter): if letter == 'A' or letter == 'E' or letter == 'I' or letter == 'O' or letter == 'U': return True return False def find_nth(haystack, needle, n): start = haystack.find(needle) while n > 1: start = haystack.find(needle, start + len(needle)) if start < 0: break n -= 1 return start def countVowels(word): count = 0