class Meaning(): def __init__(self): self.dictionary = PyDictionary() def meaning_function(self, query, task="mn"): #task can be meaning, translate, fo = open("meaning.txt", "w") if task == "mn": fo.write("Meaning :") fo.write(str(self.dictionary.meaning(query))) fo.write("Synonym :") fo.write(str(self.dictionary.synonym(query))) fo.write("Antonym :") fo.write(str(self.dictionary.antonym(query))) print(self.dictionary.meaning(query)) elif task == "tr": fo.write("Translation :") unicodedata.normalize('NFKD', self.dictionary.translate( query, 'hi')).encode('ascii', 'ignore') fo.write( unicodedata.normalize( 'NFKD', self.dictionary.translate(query, 'hi')).encode( 'ascii', 'ignore')) ##Unicode to string conversion print(self.dictionary.translate(query, 'hi')) fo.close() def __del__(self): os.remove("meaning.txt")
class Meaning(): def __init__(self): self.dictionary=PyDictionary() def meaning_function(self,query,task="mn"): #task can be meaning, translate, fo=open("meaning.txt","w") if task == "mn" : fo.write("Meaning :") fo.write(str(self.dictionary.meaning(query))) fo.write("Synonym :") fo.write(str(self.dictionary.synonym(query))) fo.write("Antonym :") fo.write(str(self.dictionary.antonym(query))) print (self.dictionary.meaning(query)) elif task =="tr": fo.write("Translation :") unicodedata.normalize('NFKD', self.dictionary.translate(query,'hi')).encode('ascii','ignore') fo.write(unicodedata.normalize('NFKD', self.dictionary.translate(query,'hi')).encode('ascii','ignore')) ##Unicode to string conversion print(self.dictionary.translate(query,'hi')) fo.close() def __del__(self): os.remove("meaning.txt")
def index(request): word = request.GET.get("word", "") htmlbody = "" if word != "": dictionary = PyDictionary() htmlbody = "<h1 style='color:blue'>" + word + ":</h1><br>" try: meaning = dictionary.meaning(word) for key in meaning: htmlbody = htmlbody + "<h3 style='color:red'>" + str( key) + "</h3>" for items in meaning[key]: htmlbody = htmlbody + "<h4>" + str(items) + '</h4>' htmlbody += "<br>" synonyms = dictionary.synonym(word) htmlbody += "<b style ='text-size:large; color:red'> Synonyms: </b>" for items in synonyms: htmlbody += "<b>" + items + "," + "</b>" htmlbody += "<br>" antonym = dictionary.antonym(word) htmlbody += "<b style ='text-size:large; color:red'> Antonyms: </b>" for items in antonym: htmlbody += "<b>" + items + "," + "</b>" htmlbody += "<br><br>" + "<b style ='text-size:large; color:red'> Hindi: </b>" try: htmlbody += "<b>" + str(dictionary.translate(word, 'hi')) + " </b>" except: htmlbody += "<b>No Hindi translations</b>" except: htmlbody = htmlbody + "<h3 style='color:red'>" + "Sorry, We were unable to find any meaning for this word" + "</h3>" params = {'body': htmlbody} return render(request, "index.html", params)
def dictionaryJutsu(statement, mode): dictionary = PyDictionary() if mode == "dictionary": statement = statement.split("word")[1].strip() # If the word is a noun try: meaning = dictionary.meaning(statement)['Noun'][0] speak(f"The noun meaning is {meaning}") except: pass try: meaning = dictionary.meaning(statement)['Verb'][0] speak(f"The verb meaning is {meaning}") except: pass if mode == "antonym": statement = statement.split("word")[1] list_of_antonyms = dictionary.antonym(statement) speak(f"The antonyms of the word are {str(list_of_antonyms[:2])}") if mode == "translate": statement = statement.split('translate')[1] phrase = statement.split('to')[0].strip() targetLang = statement.split('to')[1].strip() languageCodeMapping = {'spanish': 'es', 'arabic': 'ar', 'french': 'fr', 'german': 'de', 'hindi': 'hi', 'chinese': 'zh-CN'} result = dictionary.translate(phrase, languageCodeMapping[targetLang]) speak(f"The meaning of the word {phrase} in {targetLang} is {result}")
from PyDictionary import PyDictionary dictionary = PyDictionary() result = (dictionary.translate("range", 'es')) print(result)
# -*- coding: utf-8 -*- """ Created on Sun Sep 15 17:33:44 2019 @author: Vivek """ from PyDictionary import PyDictionary dictionary = PyDictionary() print(dictionary.translate("Vivek", 'hi'))
from PyDictionary import PyDictionary from gtts import gTTS import os dictionary = PyDictionary() wordsb = input() print(dictionary.meaning(wordsb)) print(dictionary.synonym(wordsb)) result = dictionary.translate(wordsb, 'hi') tts = gTTS(text=result, lang='hi') tts.save("hello1.mp3")
from PyDictionary import PyDictionary dictionary = PyDictionary() print(dictionary.meaning("indentation")) print(dictionary.synonym("life")) print(dictionary.antonym("paper")) print(dictionary.translate("extreme", 'es')) dictionary = PyDictionary("hotel", "ambush", "nonchalant", "perceptive") 'There can be any number of words in the Instance' print(dictionary.printMeanings() ) # '''This print the meanings of all the words''' print(dictionary.getMeanings() ) # '''This will return meanings as dictionaries''' print(dictionary.getSynonyms()) print(dictionary.translateTo( "hi")) # '''This will translate all words to Hindi'''
name2 = input() print("D.O.B(only year):") dob = input() age = 2019 - int(dob) print("AGE: ", age) print("GENDER: ") gender = input() if (gender == "male"): wishme() print("Hello Mr", name, "sir") speak("once again welcome sir") print("DICTIONARY HUB") speak("sir please type which word you found") dictionary = PyDictionary() word = input() b = dictionary.translate(word, 'hi') print(b) speak("thank you for visiting our application,have a nice day sir") exit() elif (gender == "female"): missme() print("Hello Mrs", name, "madam") speak("once again welcome mam") print("DICTIONARY HUB") speak("madam please type which word you found") word = input() dictionary = PyDictionary() c = dictionary.translate(word, 'hi') print(c) speak("thank you for visiting our application,have a nice day madam") exit()
def rex_start(): r = sr.Recognizer() engine1 = pyttsx3.init() engine1.setProperty('rate', 150) engine1.say("Hello, I am Rex the voice assistant.") engine1.runAndWait() with sr.Microphone() as source: print('Say Something!') engine1.say("Say Something that i can do for you") engine1.runAndWait() audio = r.listen(source) print('Done!') try: text = r.recognize_google(audio) print('You said:\n' + text) lang = 'en' engine1 = pyttsx3.init() engine1.setProperty('rate', 150) engine1.say("You Searched " + text) engine1.runAndWait() if text == 'YouTube': wb.open('https://www.youtube.com') elif text == 'tell a joke': z = """A man asks a farmer near a field, Sorry sir, would you mind if I crossed your field instead of going around it? You see, I have to catch the 4:23 train.\n\ The farmer says, Sure, go right ahead. And if my bull sees you, you’ll even catch the 4:11 one.""" engine1.say(z) print(z) engine1.runAndWait() elif text == 'tell me a weather': api_address = 'http://api.openweathermap.org/data/2.5/weather?appid=c3b39dac5374cedd511992b18e7fb675&q=Sahiwal' # city = input('City Name :') # url = api_address + city json_data = requests.get(api_address).json() format_add = json_data['weather'][0]['description'] engine1.say(format_add) print(format_add) engine1.runAndWait() elif text == 'tell me a time': z = datetime.datetime.now() z1 = z.strftime("%I hours %M minutes %S seconds %p") z2 = z.strftime("%A, %B %d, %Y") engine1.say(z2) engine1.say(z1) print(z1) print(z2) engine1.runAndWait() elif text == 'dictionary': dictionary = PyDictionary() engine1.say("say a word") engine1.runAndWait() with sr.Microphone() as source1: audio1 = r.listen(source1) text1 = r.recognize_google(audio1) lang = 'en' print(text1) engine1.say("You Said " + text1) engine1.say("Meaning") print("Meanings:") print(dictionary.meaning(text1)) print(dictionary.translate(text1, 'ur-PK')) engine1.say(dictionary.meaning(text1)) engine1.say(dictionary.translate(text1, 'ur-PK')) engine1.runAndWait() else: f_text = 'https://www.google.com.pk/search?hl=en&source=hp&ei=x80UXKqXMK6QmgXI8oDwAg&q=' + text wb.open(f_text) except Exception as e: print(e)
from PyDictionary import PyDictionary dictionary = PyDictionary() print(dictionary.translate("Range", 'es'))
# from googletrans import Translator # # translator = Translator() # # print(translator.translate(text='thinking', src='en', dest='ru').pronunciation) from PyDictionary import PyDictionary dictionary = PyDictionary() print(dictionary.translate("perhaps", 'ru'))
'''Get definitions for a word''' # Do a "pip install PyDictionary" from command line first. from PyDictionary import PyDictionary DICT = PyDictionary() CHECK_WORD = "house" print(DICT.meaning(CHECK_WORD)) print("-----------") print(DICT.synonym(CHECK_WORD)) print("-----------") print(DICT.antonym(CHECK_WORD)) print("-----------") print("French translation:") print(DICT.translate(CHECK_WORD, 'fr')) # Note ignore the warnings, this wouldn't show up in a GUI,
def dictionary(): message_dict = raw_input("Enter Word : ") dictionary = PyDictionary(message_dict) print(dictionary.printMeanings()) print "Translation To Hindi : {0}".format( (dictionary.translate(message_dict, 'hi')))
async def on_message(message): tempMessage = 0 error = "" if message.content.startswith("//"): await client.send_typing(message.channel) message = message await client.delete_message(message) if message.author.top_role.name == "No Bot": error = "You have been banned from using the bot." tempMessage = 5 msg = await client.send_message(message.channel, ":no_entry_sign: " + message.author.mention + ", " + error) await asyncio.sleep(tempMessage) await client.delete_message(msg) return elif message.content.startswith("//editme"): msg = await client.send_message(message.author, "10") await asyncio.sleep(3) await client.edit_message(msg, "40") # we do not want the bot to reply to itself elif message.author == client.user: return if message.content.startswith("//hello"): msg = "Hello " + message.author.mention await client.send_message(message.channel, msg, tts=True) elif message.content.startswith("//ask"): if message.author.top_role.name == "Admin": adminState = True else: adminState = False if "?" not in message.content: await client.send_message(message.channel, "**Question: **" + str((message.content[6:] + "?").capitalize()) + " **Response:** " + AI.answer(message.content[6:],adminState)) else: await client.send_message(message.channel, "**Question: **" + str((message.content[6:]).capitalize()) + " **Response:** " + AI.answer(message.content[6:],adminState)) #elif message.content.startswith("//chat"): # await client.send_message(message.channel, message.author.mention + ": " + str((message.content[7:]).capitalize()) + "\n" + # str(ChatBot.talk(message.content[7:])).capitalize()) elif message.content.startswith("//petition"): votes = 7 if len(message.content[11:]) != 0: msg = await client.send_message(message.channel, ":ballot_box_with_check: Petition: **" + message.content[11:] + "** - created by: " + message.author.mention + ". React with thumbs on this message to vote @here! (" + str(votes) + " votes to pass or fail)") await client.add_reaction(msg, "\U0001F44D") await client.add_reaction(msg, "\U0001F44E") count = [0,0] #[upvotes,downvotes] alreadyVoted = [[],[]] upvotedDisplay = await client.send_message(message.channel, "**0** \U0001F44D") downvotedDisplay = await client.send_message(message.channel, "**0** \U0001F44E") await asyncio.sleep(1) while count[0] <= votes - 1 and count[1] <= votes - 1: res = await client.wait_for_reaction(message=msg) if res.user != client.user: await client.remove_reaction(msg, res.reaction.emoji, res.user) if (res.user not in alreadyVoted[0]) and (res.user not in alreadyVoted[1]) and (res.user != client.user): if res.reaction.emoji == "\U0001F44D": count[0] += 1 alreadyVoted[0].append(res.user) if res.reaction.emoji == "\U0001F44E": count[1] += 1 alreadyVoted[1].append(res.user) displayVotedUp = ', '.join(str(votee) for votee in alreadyVoted[0]) await client.edit_message(upvotedDisplay, "**" + str(len(alreadyVoted[0])) + "** \U0001F44D " + displayVotedUp) displayVotedDown = ', '.join(str(votee) for votee in alreadyVoted[1]) await client.edit_message(downvotedDisplay, "**" + str(len(alreadyVoted[1])) + "** \U0001F44E " + displayVotedDown) if count[0] >= votes: await client.send_message(message.channel, ":ballot_box_with_check: Voting done! - sent to an admin for review.") adminChannel = client.get_channel("256589191444430848") await client.send_message(adminChannel, ":ballot_box_with_check: Petition created by: " + message.author.mention + " passed voting! - **" + message.content[11:] + "**") else: await client.send_message(message.channel, ":x: Voting failed!") else: msg = await client.send_message(message.channel, "Please enter something into the petition.") await asyncio.sleep(5) await client.delete_message(msg) elif message.content.startswith("//vote"): #http://discordpy.readthedocs.io/en/latest/api.html#discord.Client.get_reaction_users voteEmoticion = "\u2714" unvoteEmoticon = "\u2716" if len(message.content[7:]) != 0: msg = await client.send_message(message.channel, ":ballot_box_with_check: Vote: **" + message.content[7:] + "** - created by: " + message.author.mention + ". @here, Click vote reactions to vote!") await asyncio.sleep(0.5) await client.add_reaction(msg, voteEmoticion) await client.add_reaction(msg, unvoteEmoticon) else: msg = await client.send_message(message.channel, ":no_entry_sign: " + message.author.mention + ", Please enter something into the vote.") await asyncio.sleep(5) await client.delete_message(msg) elif message.content.startswith("//deleteme"): tempMessage = 0.1 msg = await client.send_message(message.channel, "You saw nothin.") elif message.content.startswith("//roll"): limit = int(message.content[6:]) result = str(random.randint(1, limit)) msg = await client.send_message(message.channel, result) tempMessage = 5 elif message.content.startswith("//help detail"): await client.send_message(message.channel, helpText.help() + version + "\n" + message.author.mention) elif message.content.startswith("//help"): await client.send_message(message.channel, helpText.helpSimple() + version + "\n" + message.author.mention) elif message.content.startswith("//choose"): if len(message.content[9:]) == 0: error = "Invalid format! //choose <option 1 2 3...>" tempMessage = 5 return else: given = message.content[9:] choices = given.split(", ") msg = await client.send_message(message.channel, "Options:" + message.content[8:] + ", I have chosen: **" + random.choice(choices) + "**") elif message.content.startswith("//salt" or "//salty"): msg = await client.send_file(message.channel, "salt.png") tempMessage = 30 elif message.content.startswith("//wiki"): try: page = wikipedia.page(message.content[7:]) emb = discord.Embed() emb.title = ":newspaper: " + str(page.title) emb.description = str(wikipedia.summary(message.content[7:]))[:500] + "..." + " \n*Read more here: " + str(page.url) + "*" await client.send_message(message.channel, embed=emb) except wikipedia.exceptions.DisambiguationError as exception: emb = discord.Embed() emb.title = "Couldn't find any Wikipedia pages, try these:" emb.description = ", ".join(exception.options) msg = await client.send_message(message.channel, embed=emb) tempMessage = 30 elif message.content.startswith("//define"): dictionary=PyDictionary() if dictionary.meaning(message.content[9:]) == None: error = "Sorry I can't seem to find a definition for that." tempMessage = 5 else: emb = discord.Embed() emb.title = str(":abc: Definition: " + message.content[9:]) emb.description = str(dictionary.meaning(message.content[9:])) await client.send_message(message.channel, embed=emb) elif message.content.startswith("//antonym"): dictionary=PyDictionary() if dictionary.meaning(message.content[9:]) == None: error = "Sorry I can't seem to find an antonym for that." tempMessage = 5 else: emb = discord.Embed() emb.title = str(":ab: Antonym: " + message.content[10:]) emb.description = ", ".join(dictionary.antonym(message.content[10:])) await client.send_message(message.channel, embed=emb) elif message.content.startswith("//synonym"): dictionary=PyDictionary() if dictionary.meaning(message.content[9:]) == None: error = "Sorry I can't seem to find an synonym for that." tempMessage = 5 else: emb = discord.Embed() emb.title = str(":abcd: Synonym: " + message.content[10:]) emb.description = ", ".join(dictionary.synonym(message.content[10:])) await client.send_message(message.channel, embed=emb) elif message.content.startswith("//french"): dictionary=PyDictionary() if dictionary.meaning(message.content[9:]) == None: error = "Sorry I can't seem to translate that. (one word please)" tempMessage = 5 else: emb = discord.Embed() emb.title = str(":symbols: Engligh: " + message.content[9:]) emb.description = "**French:** " + str(dictionary.translate(message.content[9:],'fr')) await client.send_message(message.channel, embed=emb) elif message.content.startswith("//pepper" or "//peppery"): msg = await client.send_file(message.channel, "pepper.png") tempMessage = 30 elif message.content.startswith("//spam"): #right click spam channel for id arguments = message.content.split() conString = "" if message.channel.name == "spam": #or message.channel.id == "id" try: int(arguments[1]) except: error = "Invalid format! //spam <amount> <message...>" tempMessage = 5 return for word in arguments[2:]: conString = conString + " " + word if int(arguments[1]) <= 10: for x in range(int(arguments[1])): await client.send_message(message.channel, conString) elif int(arguments[1]) > 10: error = "I can't spam that much!" tempMessage = 5 else: error = "You may only use this in the spam text channel." tempMessage = 5 elif message.content.startswith("//morse"): decoded = (str(message.content[8:])) decoded = decoded.lower() msg = await client.send_message(message.channel, "```" + morseCommand.encode(decoded) + "```" + message.author.mention) elif message.content.startswith("//demorse"): encoded = (str(message.content[10:]) + " ") msg = await client.send_message(message.channel, "```" + morseCommand.decode(encoded) + "```" + message.author.mention) elif message.content.startswith("//flip" or "//flipr"): if message.content.startswith("//flip "): unflip = (str(message.content[7:]))[::-1] else: unflip = (str(message.content[8:])) unflip = unflip.lower() msg = await client.send_message(message.channel, morseCommand.flip(unflip)) elif message.content.startswith('//repeat'): try: if message.content.startswith('//repeat init'): if message.author.top_role.name == 'Admin': global stored stored = [] msg = await client.send_message(message.channel, '```Initialized!```' + message.author.mention) tempMessage = 3 else: msg = await client.send_message(message.channel, '```Insufficient permissions!```' + message.author.mention) tempMessage = 3 elif message.content.startswith('//repeat save'): found = False if len(stored) > 0: for mes in range(len(stored)): if stored[mes][0] == message.author.mention: stored[mes][1] = message.content[14:] msg = await client.send_message(message.channel, '```Message recorded!```' + message.author.mention) tempMessage = 3 found = True if found == False: stored.append([message.author.mention, message.content[14:]]) msg = await client.send_message(message.channel, '```Message recorded!```' + message.author.mention) tempMessage = 3 found = True else: stored.append([message.author.mention, message.content[14:]]) msg = await client.send_message(message.channel, '```Message recorded!```' + message.author.mention) tempMessage = 3 found = True elif message.content.startswith('//repeat call'): found = False for mes in range(len(stored)): if stored[mes][0] == message.author.mention: if len(stored[mes][1]) == 0: msg = await client.send_message(message.channel, '``` ```**Message from: **' + message.author.mention) else: msg = await client.send_message(message.channel, '```' + stored[mes][1] + '```**Message from: **' + message.author.mention) found = True if found == False: msg = await client.send_message(message.channel, '```No messages found!```' + message.author.mention) tempMessage = 3 found = True else: msg = await client.send_message(message.channel, '```Invalid subcommand!```' + message.author.mention) tempMessage = 3 except: msg = await client.send_message(message.channel, '```Not initialized!```' + message.author.mention) tempMessage = 3 elif message.content.startswith("//anon"): mes = message.content[7:] msg = await client.send_message(message.channel, mes + "\n**~Anonymous :speech_balloon:**") elif message.content.startswith("//reverse"): mes = str(message.content[:9:-1]) msg = await client.send_message(message.channel, "```" + mes + "```") elif message.content.startswith("//colour list"): msg = await client.send_message(message.channel, message.author.mention + " colours: " +", ".join(colours)) tempMessage = 10 elif message.content.startswith("//colour"): for colour in colours: await asyncio.sleep(0.1) removeRole = discord.utils.find(lambda d: d.name == "colour-" + colour, message.channel.server.roles) #thanks Khang, lambda master await client.remove_roles(message.author, removeRole) await asyncio.sleep(0.5) userColour = message.content[9:] colourRole = discord.utils.find(lambda r: r.name == "colour-" + userColour, message.channel.server.roles) await client.add_roles(message.author, colourRole) elif message.content.startswith("//hexcolour"): hx = str(message.content[12:18]) hx = hx.upper() digs = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"] valid = True for dig in hx: if dig not in digs: valid = False while len(hx) < 6: hx = "0" + hx if valid == False: msg = await client.send_message(message.channel, "```Invalid colour!```" + message.author.mention) await asyncio.sleep(3) await client.delete_message(msg) else: hxint = int("0x" + hx, 16) newrole = discord.utils.get(message.server.roles, name = "#" + hx) if newrole not in message.server.roles: await asyncio.sleep(0.2) await client.create_role(server=message.server, name="#" + hx, colour=discord.Colour(hxint), mentionable=False, hoist=False) for rol in message.author.roles: if str(rol)[0] == "#": rolefound = 0 for mem in message.server.members: if rol in mem.roles: rolefound += 1 if rolefound <= 1: await asyncio.sleep(0.2) await client.delete_role(server=message.server, role=rol) await asyncio.sleep(0.2) await client.remove_roles(message.author, rol) for rol in message.server.roles: if str(rol) == "#" + hx: await asyncio.sleep(0.2) await client.add_roles(message.author, rol) elif message.content.startswith("//notify on"): try: topic = message.content[12:] topicRole = discord.utils.find(lambda r: r.name == "notify-" + topic, message.channel.server.roles) await client.add_roles(message.author, topicRole) msg = await client.send_message(message.channel, ":bell: " + message.author.mention + ", you have been opted in to receive notifications for " + topic) tempMessage = 5 except: error = "Something went wrong." tempMessage = 5 elif message.content.startswith("//notify off"): try: topic = message.content[13:] topicRole = discord.utils.find(lambda r: r.name == "notify-" + topic, message.channel.server.roles) await client.remove_roles(message.author, topicRole) msg = await client.send_message(message.channel, ":no_bell: " + message.author.mention + ", you have been opted out of receiving receive notifications for " + topic) tempMessage = 5 except: error = "Something went wrong." tempMessage = 5 elif message.content.startswith("//notify list"): msg = await client.send_message(message.channel, ":bellhop: " + message.author.mention + " //notify <on/off> <channel> - Channel Options: rocket league, minecraft, announcements") tempMessage = 10 #await client.send_message(message.author, "10") #await client.send_message(message.channel, "WOW!", tts=True) # tts will read the message #await client.change_presence(status=discord.Status.dnd) #fix -> changes status #await client.add_roles(message.author, *roles) #server = message.author.server #member = discord.utils.find(lambda r: r.name == "colour1", server.roles) #print(member.id) #message.channel.id #await client.add_roles(message.author, "colour1") #print(member) #elif message.content.startswith("//suggest"): elif message.content.startswith("//invite"): msg = await client.send_message(message.channel, "https://discord.gg/M9hBtqn") tempMessage = 15 elif message.content.startswith("//suggest emoji"): adminChannel = client.get_channel("256589191444430848") await client.send_message(adminChannel, ":upside_down: Emoji suggestion created by: " + message.author.mention + " - Emoji:" + message.content[15:]) msg = await client.send_message(message.channel, ":upside_down:" + message.author.mention + "Sent to an admin as an emoji suggestion. Inappropriate suggestions may lead to consequences.") tempMessage = 10 elif message.content.startswith("//suggest splash"): adminChannel = client.get_channel("256589191444430848") await client.send_message(adminChannel, ":sweat_drops: Splash suggestion created by: " + message.author.mention + " - **Playing**" + message.content[16:]) msg = await client.send_message(message.channel, ":sweat_drops: " + message.author.mention + ", Sent to an admin as a splash suggestion. Inappropriate suggestions may lead to consequences. Preview: **Playing**" + message.content[16:]) tempMessage = 10 elif message.content.startswith("//suggest"): adminChannel = client.get_channel("256589191444430848") await client.send_message(adminChannel, ":clipboard: Suggestion created by: " + message.author.mention + " - " + message.content[10:]) msg = await client.send_message(message.channel, ":clipboard: " + message.author.mention + ", Sent to an admin as an overall suggestion. Inappropriate suggestions may lead to consequences.") tempMessage = 10 elif message.content.startswith("//prime"): s = int(message.content[8:]) if s <= 19999999999997: p = int(message.content[8:]) - 1 l = [] if p == 1: p -= 1 if p % 2 == 0.0: p -= 1 c = 0 while len(l) <= 1: p += 2 d = 0 c = 0 while d < p ** (1/2) and c <= 1: d += 1 if (p / d) % 1 == 0.0: c += 1 if c == 1: l.extend([p]) if l[0] == 1: l[0] = 2 orig = "" final = "" s = str(s) f = str(l[0]) for i in range(len(s)): if len(orig) % 4 == 0: orig += "\'" orig += s[-i - 1] for i in range(len(f)): if len(final) % 4 == 0: final += "\'" final += f[-i - 1] msg = await client.send_message(message.channel, "```" + orig[-1:0:-1] + " -> " + final[-1:0:-1] + "```" + message.author.mention) else: error = "Your number is too high!" tempMessage = 5 elif message.content.startswith("//rules"):############################# error = "Sorry this command is in the works! Type //help instead!" tempMessage = 10 elif message.content.startswith("//remind"): if message.author.top_role.name == "Admin": remall = message.content[9:] remtr = "" rem = "" spc = False for i in range(len(remall)): if remall[i] == " " and spc == False: spc = True if spc == False: remtr = remtr + remall[i] if spc == True: rem = rem + remall[i] remtr = int(remtr) - 1 await asyncio.sleep(remtr) await client.send_typing(message.channel) msg = await client.send_message(message.channel, "```http\nReminder:" + rem + "```@everyone") else: error = "You do not have permission to use this command." tempMessage = 5 elif message.content.startswith("//timer"): tr = int(message.content[8:]) timermsg = await client.send_message(message.channel, ":alarm_clock: Timer:" + str(tr) + " seconds.") count = 5 while tr >= 1: if count == 5: await client.edit_message(timermsg, ":alarm_clock: Timer: " + str(tr) + " seconds.") count = 0 await asyncio.sleep(1) tr -= 1 count += 1 await client.delete_message(timermsg) await client.send_typing(message.channel) msg = await client.send_message(message.channel, "```Time is up!```" + message.author.mention) tempMessage = 5 elif message.content.startswith("//delete"): try: if int(message.content[9:]) > 10: msg = await client.send_message(message.channel, ":wastebasket: Slow down there!") await asyncio.sleep(5) await client.delete_message(msg) elif message.author.top_role.name == "Admin": await client.purge_from(message.channel, limit=int(message.content[9:])) msg = await client.send_message(message.channel, ":wastebasket: Deleted " + message.content[9:] + " message(s).") await asyncio.sleep(5) await client.delete_message(msg) else: error = "You do not have permission to use this command." tempMessage = 5 except: error = "Not a valid number." tempMessage = 5 elif message.content.startswith("//room"): if message.content.startswith("//room create"): pchname = "Private " + str(message.author) pch = discord.utils.get(message.server.channels, name = pchname, type = discord.ChannelType.voice) if pch not in message.server.channels: pcheveryoneperms = discord.PermissionOverwrite(connect=False) pchroleperms = discord.PermissionOverwrite(connect=True) pchrole = await client.create_role(server=message.server,name=pchname,mentionable=True,hoist=False) await client.add_roles(message.author, pchrole) await client.create_channel(message.server, pchname, (message.server.default_role, pcheveryoneperms), (pchrole, pchroleperms), type=discord.ChannelType.voice) msg = "Private channel created!\n" + str(message.author.mention) tempMessage = 3 else: error = "You already have a private channel!\n" + str(message.author.mention) tempMessage = 3 elif message.content.startswith("//room delete"): pchname = "Private " + str(message.author) pch = discord.utils.get(message.server.channels, name = pchname, type = discord.ChannelType.voice) pchrole = discord.utils.get(message.server.roles, name = pchname) if pch in message.server.channels: await client.delete_channel(pch) await client.delete_role(message.server, pchrole) msg = "Private channel deleted!\n" + str(message.author.mention) tempMessage = 3 else: error = "No private channel exists to be deleted!\n" + str(message.author.mention) tempMessage = 3 elif message.content.startswith("//room invite all"): pchname = "Private " + str(message.author) pchrole = discord.utils.get(message.server.roles, name = pchname) msg = "Inviting all members to " + pchname + "..." for mem in message.server.members: if (pchrole not in mem.roles) and (mem != message.author): await client.add_roles(mem, pchrole) tempMessage = 0 msg = "Invited all members to " + pchname + "!" tempMessage = 5 elif message.content.startswith("//room uninvite all"): pchname = "Private " + str(message.author) pchrole = discord.utils.get(message.server.roles, name = pchname) msg = "Uninviting all members to " + pchname + "..." for mem in message.server.members: if (pchrole in mem.roles) and (mem != message.author): await client.remove_roles(mem, pchrole) tempMessage = 0 msg = "Unnvited all members to " + pchname + "!" tempMessage = 5 elif message.content.startswith("//room invite"): await client.send_typing(message.channel) pchname = "Private " + str(message.author) pchrole = discord.utils.get(message.server.roles, name = pchname) if len(message.mentions) == 0: error = "Cannot invite 0 people!\n" + message.author.mention tempMessage = 3 for newmember in message.mentions: if (newmember in message.server.members) and (pchrole not in newmember.roles): await client.add_roles(newmember, pchrole) msg = "You have been invited to " + pchname + "!\n" + newmember.mention tempMessage = 7 elif (newmember in message.server.members) and (pchrole in newmember.roles): error = str(newmember) + " is already invited to " + pchname + "!" tempMessage = 5 else: error = "Member not found!\n" + message.author.mention tempMessage = 3 elif message.content.startswith("//room uninvite"): pchname = "Private " + str(message.author) pch = discord.utils.get(message.server.channels, name = pchname, type = discord.ChannelType.voice) pchrole = discord.utils.get(message.server.roles, name = pchname) if len(message.mentions) == 0: error = "Cannot uninvite 0 people!\n" + message.author.mention tempMessage = 3 for newmember in message.mentions: if (newmember in message.server.members) and (pchrole in newmember.roles): if newmember == message.author: await client.send_typing(message.channel) error = "Cannot uninvite yourself!\n" + message.author.mention tempMessage = 3 else: await client.remove_roles(newmember, pchrole) msg = "You have been uninvited to " + pchname + "!\n" + newmember.mention tempMessage = 7 if newmember.voice_channel.id == pch.id: await client.move_member(newmember, message.server.afk_channel) else: error = "Invalid subcommand!\n" + message.author.mention tempMessage = 3 elif message.content.startswith("//numbergame"): global number number = -1 beforetr = 10 tr = 5 timermsg = await client.send_message(message.channel, ":1234: @here Number game starting in " + str(beforetr) + " seconds.") await asyncio.sleep(beforetr - tr) while tr >= 1: await client.edit_message(timermsg, ":1234: @here Number game starting in " + str(tr) + " seconds.") await asyncio.sleep(1) tr -= 1 await client.delete_message(timermsg) number = random.randint(1, 100) ingame = message.author msg = await client.send_message(message.channel, ":1234: Guess the number! (between 1 and 100)") count = 60 while count >= 0 and number != -1: count -= 1 await asyncio.sleep(1) #print(count >= 0 and number != -1, count, number) if number != -1: await client.delete_message(msg) msg = await client.send_message(message.channel, ":1234: Times up!") number = -1 await asyncio.sleep(5) await client.delete_message(msg) elif message.content.startswith("//"): error = "Sorry the \"" + message.content + "\" command does not exist! Use //help for more information." tempMessage = 5 #roles = message.author.roles[1] #print(roles.name) #----------------------------------------------------------------------------------------------------------------------------------------- if error != "": msg = await client.send_message(message.channel, ":no_entry_sign: " + message.author.mention + ", " + error) if tempMessage != 0: await asyncio.sleep(tempMessage) await client.delete_message(msg) #----------------------------------------------------------------------------------------------------------------------------------------- try: #number game cont if number >= 1: if int(message.content) < number: await client.send_typing(message.channel) await client.delete_message(message) msg = await client.send_message(message.channel, "The number is larger than " + message.content) await asyncio.sleep(5) await client.delete_message(msg) elif int(message.content) > number: await client.send_typing(message.channel) await client.delete_message(message) msg = await client.send_message(message.channel, "The number is smaller than " + message.content) await asyncio.sleep(5) await client.delete_message(msg) elif int(message.content) == number: await client.send_typing(message.channel) await client.delete_message(message) await client.send_message(message.channel, "Congrats " + str(number) + " was the number " + message.author.mention) number = -1 except: return
from PyDictionary import PyDictionary dictionary = PyDictionary() # print(dictionary.meaning('high')) # print(dictionary.translate('high','es')) print(dictionary.translate('good', 'hi')) print(dictionary.translate('positive', 'hi')) # print(dictionary.antonym('Life'))
class RecursiveLambdaFunctionGrowth(object): def __init__(self): self.lambda_comp_tree=AVLTree() self.index_tree=BinaryTree() self.word_list=[] self.word_dict={} self.index_dict={} self.index_list=[] self.lambda_expression=[] self.lambda_composition="" self.graph_tensor_neuron_network_intrinsic_merit=1.0 self.entropy=10000000000.0 self.conceptnet=ConceptNet5Client() #self.Similarity="ConceptNet" self.Similarity="WordNet" self.ClosedPaths=True self.dictionary=PyDictionary() def get_next_tree_traversal_id(self,x,y): if y-x == 1 or x-y == 1: return 1 print "x,y:",x,y self.index_list.append((x+y)/2) self.get_next_tree_traversal_id(x,(x+y)/2) self.get_next_tree_traversal_id((x+y)/2,y) def build_lambda_expression(self,key,value): #print value, self.lambda_expression.append(value) def build_lambda_comp_tree(self,k,v): if k < len(self.word_list): self.word_dict[k]=self.word_list[k] def return_next(self,k,v): return (k,v) def grow_lambda_function2(self, wordlist): self.word_list=wordlist self.word_dict={} cnt=0 while cnt < len(self.word_list): self.index_dict[cnt]=cnt cnt+=1 self.index_tree=BinaryTree(self.index_dict) self.index_tree.foreach(self.build_lambda_comp_tree,0) self.lambda_comp_tree=AVLTree(self.word_dict) print "===========================================================================" print "Lambda Composition AVL Tree (inorder traversed) is the original text itself:" print "===========================================================================" self.lambda_expression=[] self.lambda_comp_tree.foreach(self.build_lambda_expression, 0) print self.lambda_expression print "===========================================================================" print "Lambda Composition AVL Tree (postorder traversed - Postfix expression):" print "Every parenthesis has two operands,operated by function outside:" print "===============================================================" self.lambda_expression=[] self.lambda_comp_tree.foreach(self.build_lambda_expression, 1) self.lambda_composition=[] cnt=0 per_random_walk_graph_tensor_neuron_network_intrinsic_merit = 0 #recursively evaluate the Graph Tensor Neuron Network for random walk composition tree bottom up as Graph Neural Network #having Tensor Neuron activations for each subtree. while len(self.lambda_expression) > 2 : operand2=self.lambda_expression.pop() operand1=self.lambda_expression.pop() function=self.lambda_expression.pop() subtree_graph_tensor_neuron_network_wght = self.subtree_graph_tensor_neuron_network_weight(operand1, function, operand2) self.graph_tensor_neuron_network_intrinsic_merit += subtree_graph_tensor_neuron_network_wght per_random_walk_graph_tensor_neuron_network_intrinsic_merit += subtree_graph_tensor_neuron_network_wght self.lambda_composition="("+function+"("+operand1+","+operand2+"))" self.lambda_expression.append(self.lambda_composition) cnt+=1 if len(self.lambda_expression) > 1: return (self.lambda_expression[0] + "("+self.lambda_expression[1]+")", per_random_walk_graph_tensor_neuron_network_intrinsic_merit) else: return (self.lambda_expression[0], per_random_walk_graph_tensor_neuron_network_intrinsic_merit) def grow_lambda_function1(self): text=open("RecursiveLambdaFunctionGrowth.txt","r") word_dict={} index_dict={} words_evaluated=0 word_list=text.read().split() for cnt in range(1,len(word_list)): index_dict[cnt-1] = len(word_list)/cnt index_tree=AVLTree(index_dict) print "Index AVL Tree:", repr(index_tree) #index_tree.foreach(print_node,1) try: while words_evaluated < len(word_list): #word_dict[words_evaluated]=word_list[random.randint(0,len(word_list)-1)] #print word_list[index_tree.pop_min()[0]] word_dict[words_evaluated]=word_list[index_tree.pop_min()[0]] words_evaluated+=1 except: pass self.lambda_comp_tree=AVLTree(word_dict) print "Lambda Composition AVL Tree:" self.lambda_comp_tree.foreach(print_node) iteration=0 while iteration < len(word_list): k=self.lambda_comp_tree.get(iteration) print "k:",k try: prev=self.lambda_comp_tree.prev_key(iteration) prevk=self.lambda_comp_tree.get(prev) print "prevk:",prevk except: pass try: succ=self.lambda_comp_tree.succ_key(iteration) succk=self.lambda_comp_tree.get(succ) print "succk:",succk except: pass iteration+=1 def get_tensor_neuron_potential_for_relation(self,synset_vertex,synset_r): smt=0.0 similarity=0.0 for s1, s2 in product(synset_vertex, synset_r): if self.Similarity=="WordNet": smt=wn.wup_similarity(s1,s2) if self.Similarity=="ConceptNet": s1_lemma_names=s1.lemma_names() s2_lemma_names=s2.lemma_names() smt=self.conceptnet.conceptnet_distance(s1_lemma_names[0], s2_lemma_names[0]) #print "similarity=",smt if smt > similarity and smt != 1.0: similarity = float(smt) return similarity def subtree_graph_tensor_neuron_network_weight(self, e1, r, e2): #relation_tensor_neuron_potential=self.get_tensor_neuron_potential_for_relation(r) if e1[0]=="(": e1_parsed=e1.split("(") #print "operand1:", e1_parsed[1] synset_e1 = wn.synsets(e1_parsed[1]) else: synset_e1 = wn.synsets(e1) #print "operand1:", e1 #print "Relation: ",r synset_r = wn.synsets(r) if e2[0]=="(": e2_parsed=e2.split("(") #print "operand2:", e2_parsed[1] synset_e2 = wn.synsets(e2_parsed[1]) else: #print "operand2:", e2 synset_e2 = wn.synsets(e2) similarity1 = 0.0 similarity2 = 0.0 #Children of each subtree are the Tensor Neuron inputs to the subtree root #Each subtree is evaluated as a graph neural network with weights for #each neural input to the subtree root. WordNet similarity is computed #between each child and subtree root and is presently assumed as Tensor Neuron #relation potential for the lack of better metric to measure word-word EEG potential. #If a dataset for tensor neuron potential #is available, it has to to be looked-up and numeric #potential has to be returned from here. similarity1 = self.get_tensor_neuron_potential_for_relation(synset_e1,synset_r) similarity2 = self.get_tensor_neuron_potential_for_relation(synset_e2,synset_r) if similarity1 == 0.0: similarity1 = 1.0 if similarity2 == 0.0: similarity2 = 1.0 weight1=0.5 weight2=0.5 bias=0.1 #Finally a neuron activation function (simple 1-dimensional tensor) is computed and #returned to the subtree root for next level. return (weight1*similarity1 + weight2*similarity2 + bias) def randomwalk_lambda_function_composition_tree(self,randomwalk): randomwalk_lambdacomposition=self.grow_lambda_function2(randomwalk) return randomwalk_lambdacomposition def create_summary(self,text,corenumber=3,pathsimilarity=0.8,graphtraversedsummary=False,shortestpath=True): if graphtraversedsummary==True: definitiongraph=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph(text) #This has to be replaced by a Hypergraph Transversal but NetworkX does not have Hypergraphs yet. #Hence approximating the transversal with a k-core which is the Graph counterpart of #Hypergraph transversal. Other measures create a summary too : Vertex Cover is NP-hard while Edge Cover is Polynomial Time. richclubcoeff=nx.rich_club_coefficient(definitiongraph.to_undirected()) print "Rich Club Coefficient of the Recursive Gloss Overlap Definition Graph:",richclubcoeff kcore=nx.k_core(definitiongraph,corenumber) print "Text Summarized by k-core(subgraph having vertices of degree atleast k) on the Recursive Gloss Overlap graph:" print "==========================" print "Dense subgraph edges:" print "==========================" print kcore.edges() print "==========================" if shortestpath == False: for e in kcore.edges(): for s1 in wn.synsets(e[0]): for s2 in wn.synsets(e[1]): if s1.path_similarity(s2) > pathsimilarity: lowestcommonhypernyms=s1.lowest_common_hypernyms(s2) for l in lowestcommonhypernyms: for ln in l.lemma_names(): print e[0]," and ",e[1]," are ",ln,".", else: #Following is the slightly modified version of shortest_path_distance() function #in NLTK wordnet - traverses the synset path between 2 synsets instead of distance summary={} intermediates=[] for e in kcore.edges(): for s1 in wn.synsets(e[0]): for s2 in wn.synsets(e[1]): s1dict = s1._shortest_hypernym_paths(False) s2dict = s2._shortest_hypernym_paths(False) s2dictkeys=s2dict.keys() for s,d in s1dict.iteritems(): if s in s2dictkeys: slemmanames=s.lemma_names() if slemmanames[0] not in intermediates: intermediates.append(slemmanames[0]) if len(intermediates) > 3: sentence1=e[0] + " is a " + intermediates[0] summary[sentence1]=self.relevance_to_text(sentence1,text) for i in xrange(len(intermediates)-2): sentence2= intermediates[i] + " is a " + intermediates[i+1] + "." if sentence2 not in summary: summary[sentence2]=self.relevance_to_text(sentence2,text) sentence3=intermediates[len(intermediates)-1] + " is a " + e[1] summary[sentence3]=self.relevance_to_text(sentence3,text) intermediates=[] sorted_summary=sorted(summary,key=operator.itemgetter(1), reverse=True) print "===================================================================" print "Sorted summary created from k-core dense subgraph of text RGO" print "===================================================================" for s in sorted_summary: print s, return (sorted_summary, len(sorted_summary)) else: definitiongraph_merit=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph(text) definitiongraph=definitiongraph_merit[0] richclubcoeff=nx.rich_club_coefficient(definitiongraph.to_undirected(),normalized=False) print "Rich Club Coefficient of the Recursive Gloss Overlap Definition Graph:",richclubcoeff textsentences=text.split(".") lensummary=0 summary=[] definitiongraphclasses=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlap_Classify(text) print "Text Summarized based on the Recursive Gloss Overlap graph classes the text belongs to:" prominentclasses=int(len(definitiongraphclasses[0])/2) print "Total number of classes:",len(definitiongraphclasses[0]) print "Number of prominent classes:",prominentclasses for c in definitiongraphclasses[0][:prominentclasses]: if len(summary) > len(textsentences) * 0.5: return (summary,lensummary) for s in textsentences: classsynsets=wn.synsets(c[0]) for classsynset in classsynsets: if self.relevance_to_text(classsynset.definition(), s) > 0.41: if s not in summary: summary.append(s) lensummary += len(s) print s, return (summary,lensummary) def relevance_to_text(self, sentence, text): #Ratcliff/Obershelp gestalt string pattern matching textset=set(text.split(".")) relevancescore=0.0 for t in textset: rs=difflib.SequenceMatcher(None,sentence,t).ratio() relevancescore=max(rs,relevancescore) return relevancescore def instrument_relations(self, rw_words_list): word_list_len=len(rw_words_list) instrumented_rw_words_list=[] if word_list_len==2: path=path_between(rw_words_list[0], rw_words_list[1]) for p in path: instrumented_rw_words_list.append(p) else: for n in range(0,word_list_len-2): path=path_between(rw_words_list[n], rw_words_list[n+1]) for p in path: instrumented_rw_words_list.append(p) if len(instrumented_rw_words_list) > 0: return instrumented_rw_words_list else: return rw_words_list def grow_lambda_function3(self,text,level=3): stpairs=[] maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=("",0.0) definitiongraph_merit=RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph(text,level) definitiongraph=definitiongraph_merit[0] sentiment=SentimentAnalyzer.SentimentAnalysis_RGO_Belief_Propagation_MarkovRandomFields(definitiongraph) apsp=nx.all_pairs_shortest_path(definitiongraph) for a in definitiongraph.nodes(): for b in definitiongraph.nodes(): stpairs.append((a,b)) rw_ct="" if self.ClosedPaths==False: for k,v in stpairs: try: print "===================================================================" print "Random Walk between :",k," and ",v,":",apsp[k][v] instrumented_apspkv=self.instrument_relations(apsp[k][v]) rw_ct=self.randomwalk_lambda_function_composition_tree(instrumented_apspkv) print "Random Walk Composition Tree for walk between :",k," and ",v,":",rw_ct print "maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=",maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit print "===================================================================" if rw_ct[1] > maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit[1]: maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=rw_ct except KeyError: pass rw_ct="" if self.ClosedPaths==True: allsimplecycles=nx.simple_cycles(definitiongraph) #allsimplecycles=nx.cycle_basis(definitiongraph) number_of_cycles=0 for cycle in allsimplecycles: number_of_cycles += 1 if number_of_cycles > 500: break try: print "===================================================================" print "Cycle :",cycle instrumented_cycle=self.instrument_relations(cycle) print "instrumented_cycle:",instrumented_cycle rw_ct=self.randomwalk_lambda_function_composition_tree(instrumented_cycle) print "Cycle Composition Tree for this cycle :",rw_ct print "maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=",maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit print "===================================================================" if rw_ct[1] > maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit[1]: maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=rw_ct except KeyError: pass rw_ct="" intrinsic_merit_dict={} print "grow_lambda_function3(): Graph Tensor Neuron Network Intrinsic Merit for this text:",self.graph_tensor_neuron_network_intrinsic_merit print "grow_lambda_function3(): Machine Translation Example - English to Kannada:" self.machine_translation(definitiongraph, "kn") self.korner_entropy(definitiongraph) print "grow_lambda_function3(): Korner Entropy Intrinsic Merit for this text:",self.entropy density = self.density(definitiongraph) print "grow_lambda_function3(): Graph Density (Regularity Lemma):",density bose_einstein_intrinsic_fitness=self.bose_einstein_intrinsic_fitness(definitiongraph) print "grow_lambda_function3(): Bose-Einstein Intrinsic Fitness:",bose_einstein_intrinsic_fitness print "grow_lambda_function3(): Maximum Per Random Walk Graph Tensor Neuron Network Intrinsic Merit :",maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit print "grow_lambda_function3(): Recursive Gloss Overlap Classifier classes for text:",RecursiveGlossOverlap_Classifier.RecursiveGlossOverlap_Classify(text) intrinsic_merit_dict["graph_tensor_neuron_network_intrinsic_merit"]=self.graph_tensor_neuron_network_intrinsic_merit intrinsic_merit_dict["maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit"]=maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit intrinsic_merit_dict["korner_entropy"]=self.entropy intrinsic_merit_dict["density"]=density intrinsic_merit_dict["bose_einstein_intrinsic_fitness"]=bose_einstein_intrinsic_fitness intrinsic_merit_dict["recursive_gloss_overlap_intrinsic_merit"]=definitiongraph_merit[1] intrinsic_merit_dict["empath_sentiment"]=sentiment write_dot(definitiongraph,"RecursiveLambdaFunctionGrowth.dot") self.graph_tensor_neuron_network_intrinsic_merit=1.0 print "intrinsic_merit_dict:",intrinsic_merit_dict return intrinsic_merit_dict def machine_translation(self, definitiongraph, languagecode): nodes=definitiongraph.nodes() edges=definitiongraph.edges() translationgraph=nx.DiGraph() for k, v in edges: ktrans=self.dictionary.translate(k,languagecode) vtrans=self.dictionary.translate(v,languagecode) print "k=",k,",v=",v,",ktrans=",ktrans,",vtrans=",vtrans translationgraph.add_edge(ktrans, vtrans) translationgraph.add_edge(vtrans, ktrans) print "TextGraph Translated to ",languagecode,":",translationgraph #KornerEntropy(G) = minimum [- sum_v_in_V(G) {1/|V(G)| * log(Pr[v in Y])}] for each independent set Y def korner_entropy(self, definitiongraph): nodes=definitiongraph.nodes() stable_sets=[] for v in nodes: stable_sets.append(nx.maximal_independent_set(definitiongraph.to_undirected(),[v])) print "korner_entropy(): Stable Independent Sets:",stable_sets entropy=0.0 prob_v_in_stableset=0.0 for v in nodes: for s in stable_sets: if v in s: prob_v_in_stableset=math.log(0.999999) else: prob_v_in_stableset=math.log(0.000001) entropy += (-1.0) * float(1.0/len(nodes)) * prob_v_in_stableset if entropy < self.entropy: self.entropy = entropy entropy=0.0 return self.entropy #Graph Density - Regularity Lemma def density(self, definitiongraph): dty=nx.classes.function.density(definitiongraph) return dty #Bose-Einstein Bianconi intrinsic fitness def bose_einstein_intrinsic_fitness(self, definitiongraph): #Bose-Einstein fitness presently assumes energy of a document vertex in a link graph to be #the entropy or extent of chaos in the definition graph of document text #This has to be replaced by a more suitable fitness measure #Bose-Einstein Condensation function value is hardcoded entropy = self.korner_entropy(definitiongraph) becf = 0.3 bei_fitness = math.pow(2, -1 * becf * entropy) return bei_fitness
# iterate though words of the text for word in text: # find zipf_frequency of the word to determine familiarity of the word zipf = zipf_frequency(word, from_language) if zipf <= 4.5: i += 1 output_file.write('\\b ' + str(i) + '.' + word + ' ' + str(zipf) + ' \\b0') meaning = dictionary.meaning(word) if type(meaning) is dict: for k, v in meaning.items(): output_file.write(' \\b ' + k + ": " + ' \\b0 ' + ', '.join(v)) # f.write(str(meaning)) else: print(meaning) output_file.write(((' ترجمة قوقل: ' + str(dictionary.translate(word, to_language)) + ' ').encode('rtfunicode')).decode()) translation = translator.translate(word) if 'MEMORY WARNING: YOU USED ALL AVAILABLE FREE TRANSLATIONS FOR TODAY.' in translation: output_file.write('$' * 30) break else: output_file.write(translation.encode('rtfunicode').decode() + ' ') output_file.write('🎩🧥�🎩🧥�🎩🧥�\n'.encode('rtfunicode').decode()) output_file.write('}') output_file.close()
# translation = dictionary.translate("happy",'de') # print(translation) ###geht # dictionary.translate("land",'en') # a = dictionary.translate("länglich",'en') # print(a) # translations = dictionary.synonym(a) # print(translations) # b = [dictionary.translate(word,'de') for word in translations] # print(b) H_syny = {} for feat in H: feat_eng = dictionary.translate(feat,'en') print(feat_eng) feat_eng_syns = dictionary.synonym(feat_eng) print(feat_eng_syns) feat_de = [dictionary.translate(word,'de') for word in feat_eng_syns] #print(feat_de) H_syny[feat]= feat_de print(H_syny) Hy_syny = {} for feat in HY: feat_eng = dictionary.translate(feat,'en') print(feat_eng) feat_eng_syns = dictionary.synonym(feat_eng) print(feat_eng_syns) feat_de = [dictionary.translate(word,'de') for word in feat_eng_syns]
from PyDictionary import PyDictionary dictionary = PyDictionary() import pyttsx engine = pyttsx.init() engine.setProperty('rate', 150) variable = "" while variable != 'quit': variable = raw_input('Lookup a word, type something in: ') meaning = (dictionary.meaning(variable)) synonym = (dictionary.synonym(variable)) antonym = (dictionary.antonym(variable)) translate = (dictionary.translate(variable, 'es')) google = (dictionary.googlemeaning(variable)) print("meaning :", meaning) print('\n') print("synonym :", synonym) print('\n') print("antonym :", antonym) print('\n') print("translated to spanish :", translate) print('\n') print("google meaning: ", google) engine.say('google meaning is ') engine.say(google) engine.runAndWait()
class RecursiveLambdaFunctionGrowth(object): def __init__(self): self.lambda_comp_tree = AVLTree() self.index_tree = BinaryTree() self.word_list = [] self.word_dict = {} self.index_dict = {} self.index_list = [] self.lambda_expression = [] self.lambda_composition = "" self.graph_tensor_neuron_network_intrinsic_merit = 1.0 self.entropy = 10000000000.0 self.conceptnet = ConceptNet5Client() #self.Similarity="ConceptNet" self.Similarity = "WordNet" self.ClosedPaths = True self.dictionary = PyDictionary() def get_next_tree_traversal_id(self, x, y): if y - x == 1 or x - y == 1: return 1 print "x,y:", x, y self.index_list.append((x + y) / 2) self.get_next_tree_traversal_id(x, (x + y) / 2) self.get_next_tree_traversal_id((x + y) / 2, y) def build_lambda_expression(self, key, value): #print value, self.lambda_expression.append(value) def build_lambda_comp_tree(self, k, v): if k < len(self.word_list): self.word_dict[k] = self.word_list[k] def return_next(self, k, v): return (k, v) def grow_lambda_function2(self, wordlist): self.word_list = wordlist self.word_dict = {} cnt = 0 while cnt < len(self.word_list): self.index_dict[cnt] = cnt cnt += 1 self.index_tree = BinaryTree(self.index_dict) self.index_tree.foreach(self.build_lambda_comp_tree, 0) self.lambda_comp_tree = AVLTree(self.word_dict) print "===========================================================================" print "Lambda Composition AVL Tree (inorder traversed) is the original text itself:" print "===========================================================================" self.lambda_expression = [] self.lambda_comp_tree.foreach(self.build_lambda_expression, 0) print self.lambda_expression print "===========================================================================" print "Lambda Composition AVL Tree (postorder traversed - Postfix expression):" print "Every parenthesis has two operands,operated by function outside:" print "===============================================================" self.lambda_expression = [] self.lambda_comp_tree.foreach(self.build_lambda_expression, 1) self.lambda_composition = [] cnt = 0 per_random_walk_graph_tensor_neuron_network_intrinsic_merit = 0 #recursively evaluate the Graph Tensor Neuron Network for random walk composition tree bottom up as Graph Neural Network #having Tensor Neuron activations for each subtree. while len(self.lambda_expression) > 2: operand2 = self.lambda_expression.pop() operand1 = self.lambda_expression.pop() function = self.lambda_expression.pop() subtree_graph_tensor_neuron_network_wght = self.subtree_graph_tensor_neuron_network_weight( operand1, function, operand2) self.graph_tensor_neuron_network_intrinsic_merit += subtree_graph_tensor_neuron_network_wght per_random_walk_graph_tensor_neuron_network_intrinsic_merit += subtree_graph_tensor_neuron_network_wght self.lambda_composition = "(" + function + "(" + operand1 + "," + operand2 + "))" self.lambda_expression.append(self.lambda_composition) cnt += 1 if len(self.lambda_expression) > 1: return ( self.lambda_expression[0] + "(" + self.lambda_expression[1] + ")", per_random_walk_graph_tensor_neuron_network_intrinsic_merit) else: return ( self.lambda_expression[0], per_random_walk_graph_tensor_neuron_network_intrinsic_merit) def grow_lambda_function1(self): text = open("RecursiveLambdaFunctionGrowth.txt", "r") word_dict = {} index_dict = {} words_evaluated = 0 word_list = text.read().split() for cnt in range(1, len(word_list)): index_dict[cnt - 1] = len(word_list) / cnt index_tree = AVLTree(index_dict) print "Index AVL Tree:", repr(index_tree) #index_tree.foreach(print_node,1) try: while words_evaluated < len(word_list): #word_dict[words_evaluated]=word_list[random.randint(0,len(word_list)-1)] #print word_list[index_tree.pop_min()[0]] word_dict[words_evaluated] = word_list[index_tree.pop_min()[0]] words_evaluated += 1 except: pass self.lambda_comp_tree = AVLTree(word_dict) print "Lambda Composition AVL Tree:" self.lambda_comp_tree.foreach(print_node) iteration = 0 while iteration < len(word_list): k = self.lambda_comp_tree.get(iteration) print "k:", k try: prev = self.lambda_comp_tree.prev_key(iteration) prevk = self.lambda_comp_tree.get(prev) print "prevk:", prevk except: pass try: succ = self.lambda_comp_tree.succ_key(iteration) succk = self.lambda_comp_tree.get(succ) print "succk:", succk except: pass iteration += 1 def get_tensor_neuron_potential_for_relation(self, synset_vertex, synset_r): smt = 0.0 similarity = 0.0 for s1, s2 in product(synset_vertex, synset_r): if self.Similarity == "WordNet": smt = wn.wup_similarity(s1, s2) if self.Similarity == "ConceptNet": s1_lemma_names = s1.lemma_names() s2_lemma_names = s2.lemma_names() smt = self.conceptnet.conceptnet_distance( s1_lemma_names[0], s2_lemma_names[0]) #print "similarity=",smt if smt > similarity and smt != 1.0: similarity = float(smt) return similarity def subtree_graph_tensor_neuron_network_weight(self, e1, r, e2): #relation_tensor_neuron_potential=self.get_tensor_neuron_potential_for_relation(r) if e1[0] == "(": e1_parsed = e1.split("(") #print "operand1:", e1_parsed[1] synset_e1 = wn.synsets(e1_parsed[1]) else: synset_e1 = wn.synsets(e1) #print "operand1:", e1 #print "Relation: ",r synset_r = wn.synsets(r) if e2[0] == "(": e2_parsed = e2.split("(") #print "operand2:", e2_parsed[1] synset_e2 = wn.synsets(e2_parsed[1]) else: #print "operand2:", e2 synset_e2 = wn.synsets(e2) similarity1 = 0.0 similarity2 = 0.0 #Children of each subtree are the Tensor Neuron inputs to the subtree root #Each subtree is evaluated as a graph neural network with weights for #each neural input to the subtree root. WordNet similarity is computed #between each child and subtree root and is presently assumed as Tensor Neuron #relation potential for the lack of better metric to measure word-word EEG potential. #If a dataset for tensor neuron potential #is available, it has to to be looked-up and numeric #potential has to be returned from here. similarity1 = self.get_tensor_neuron_potential_for_relation( synset_e1, synset_r) similarity2 = self.get_tensor_neuron_potential_for_relation( synset_e2, synset_r) if similarity1 == 0.0: similarity1 = 1.0 if similarity2 == 0.0: similarity2 = 1.0 weight1 = 0.5 weight2 = 0.5 bias = 0.1 #Finally a neuron activation function (simple 1-dimensional tensor) is computed and #returned to the subtree root for next level. return (weight1 * similarity1 + weight2 * similarity2 + bias) def randomwalk_lambda_function_composition_tree(self, randomwalk): randomwalk_lambdacomposition = self.grow_lambda_function2(randomwalk) return randomwalk_lambdacomposition def create_summary(self, text, corenumber=3, pathsimilarity=0.8, graphtraversedsummary=False, shortestpath=True): if graphtraversedsummary == True: definitiongraph = RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph( text) #This has to be replaced by a Hypergraph Transversal but NetworkX does not have Hypergraphs yet. #Hence approximating the transversal with a k-core which is the Graph counterpart of #Hypergraph transversal. Other measures create a summary too : Vertex Cover is NP-hard while Edge Cover is Polynomial Time. richclubcoeff = nx.rich_club_coefficient( definitiongraph.to_undirected()) print "Rich Club Coefficient of the Recursive Gloss Overlap Definition Graph:", richclubcoeff kcore = nx.k_core(definitiongraph, corenumber) print "Text Summarized by k-core(subgraph having vertices of degree atleast k) on the Recursive Gloss Overlap graph:" print "==========================" print "Dense subgraph edges:" print "==========================" print kcore.edges() print "==========================" if shortestpath == False: for e in kcore.edges(): for s1 in wn.synsets(e[0]): for s2 in wn.synsets(e[1]): if s1.path_similarity(s2) > pathsimilarity: lowestcommonhypernyms = s1.lowest_common_hypernyms( s2) for l in lowestcommonhypernyms: for ln in l.lemma_names(): print e[0], " and ", e[ 1], " are ", ln, ".", else: #Following is the slightly modified version of shortest_path_distance() function #in NLTK wordnet - traverses the synset path between 2 synsets instead of distance summary = {} intermediates = [] for e in kcore.edges(): for s1 in wn.synsets(e[0]): for s2 in wn.synsets(e[1]): s1dict = s1._shortest_hypernym_paths(False) s2dict = s2._shortest_hypernym_paths(False) s2dictkeys = s2dict.keys() for s, d in s1dict.iteritems(): if s in s2dictkeys: slemmanames = s.lemma_names() if slemmanames[0] not in intermediates: intermediates.append(slemmanames[0]) if len(intermediates) > 3: sentence1 = e[0] + " is a " + intermediates[0] summary[sentence1] = self.relevance_to_text( sentence1, text) for i in xrange(len(intermediates) - 2): sentence2 = intermediates[ i] + " is a " + intermediates[i + 1] + "." if sentence2 not in summary: summary[sentence2] = self.relevance_to_text( sentence2, text) sentence3 = intermediates[len(intermediates) - 1] + " is a " + e[1] summary[sentence3] = self.relevance_to_text( sentence3, text) intermediates = [] sorted_summary = sorted(summary, key=operator.itemgetter(1), reverse=True) print "===================================================================" print "Sorted summary created from k-core dense subgraph of text RGO" print "===================================================================" for s in sorted_summary: print s, return (sorted_summary, len(sorted_summary)) else: definitiongraph_merit = RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph( text) definitiongraph = definitiongraph_merit[0] richclubcoeff = nx.rich_club_coefficient( definitiongraph.to_undirected(), normalized=False) print "Rich Club Coefficient of the Recursive Gloss Overlap Definition Graph:", richclubcoeff textsentences = text.split(".") lensummary = 0 summary = [] definitiongraphclasses = RecursiveGlossOverlap_Classifier.RecursiveGlossOverlap_Classify( text) print "Text Summarized based on the Recursive Gloss Overlap graph classes the text belongs to:" prominentclasses = int(len(definitiongraphclasses[0]) / 2) print "Total number of classes:", len(definitiongraphclasses[0]) print "Number of prominent classes:", prominentclasses for c in definitiongraphclasses[0][:prominentclasses]: if len(summary) > len(textsentences) * 0.5: return (summary, lensummary) for s in textsentences: classsynsets = wn.synsets(c[0]) for classsynset in classsynsets: if self.relevance_to_text(classsynset.definition(), s) > 0.41: if s not in summary: summary.append(s) lensummary += len(s) print s, return (summary, lensummary) def relevance_to_text(self, sentence, text): #Ratcliff/Obershelp gestalt string pattern matching textset = set(text.split(".")) relevancescore = 0.0 for t in textset: rs = difflib.SequenceMatcher(None, sentence, t).ratio() relevancescore = max(rs, relevancescore) return relevancescore def instrument_relations(self, rw_words_list): word_list_len = len(rw_words_list) instrumented_rw_words_list = [] if word_list_len == 2: path = path_between(rw_words_list[0], rw_words_list[1]) for p in path: instrumented_rw_words_list.append(p) else: for n in range(0, word_list_len - 2): path = path_between(rw_words_list[n], rw_words_list[n + 1]) for p in path: instrumented_rw_words_list.append(p) if len(instrumented_rw_words_list) > 0: return instrumented_rw_words_list else: return rw_words_list def grow_lambda_function3(self, text, level=3): stpairs = [] maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit = ( "", 0.0) definitiongraph_merit = RecursiveGlossOverlap_Classifier.RecursiveGlossOverlapGraph( text, level) definitiongraph = definitiongraph_merit[0] sentiment = SentimentAnalyzer.SentimentAnalysis_RGO_Belief_Propagation_MarkovRandomFields( definitiongraph) apsp = nx.all_pairs_shortest_path(definitiongraph) for a in definitiongraph.nodes(): for b in definitiongraph.nodes(): stpairs.append((a, b)) rw_ct = "" if self.ClosedPaths == False: for k, v in stpairs: try: print "===================================================================" print "Random Walk between :", k, " and ", v, ":", apsp[k][ v] instrumented_apspkv = self.instrument_relations(apsp[k][v]) rw_ct = self.randomwalk_lambda_function_composition_tree( instrumented_apspkv) print "Random Walk Composition Tree for walk between :", k, " and ", v, ":", rw_ct print "maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=", maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit print "===================================================================" if rw_ct[ 1] > maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit[ 1]: maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit = rw_ct except KeyError: pass rw_ct = "" if self.ClosedPaths == True: allsimplecycles = nx.simple_cycles(definitiongraph) #allsimplecycles=nx.cycle_basis(definitiongraph) number_of_cycles = 0 for cycle in allsimplecycles: number_of_cycles += 1 if number_of_cycles > 500: break try: print "===================================================================" print "Cycle :", cycle instrumented_cycle = self.instrument_relations(cycle) print "instrumented_cycle:", instrumented_cycle rw_ct = self.randomwalk_lambda_function_composition_tree( instrumented_cycle) print "Cycle Composition Tree for this cycle :", rw_ct print "maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit=", maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit print "===================================================================" if rw_ct[ 1] > maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit[ 1]: maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit = rw_ct except KeyError: pass rw_ct = "" intrinsic_merit_dict = {} print "grow_lambda_function3(): Graph Tensor Neuron Network Intrinsic Merit for this text:", self.graph_tensor_neuron_network_intrinsic_merit print "grow_lambda_function3(): Machine Translation Example - English to Kannada:" self.machine_translation(definitiongraph, "kn") self.korner_entropy(definitiongraph) print "grow_lambda_function3(): Korner Entropy Intrinsic Merit for this text:", self.entropy density = self.density(definitiongraph) print "grow_lambda_function3(): Graph Density (Regularity Lemma):", density bose_einstein_intrinsic_fitness = self.bose_einstein_intrinsic_fitness( definitiongraph) print "grow_lambda_function3(): Bose-Einstein Intrinsic Fitness:", bose_einstein_intrinsic_fitness print "grow_lambda_function3(): Maximum Per Random Walk Graph Tensor Neuron Network Intrinsic Merit :", maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit print "grow_lambda_function3(): Recursive Gloss Overlap Classifier classes for text:", RecursiveGlossOverlap_Classifier.RecursiveGlossOverlap_Classify( text) intrinsic_merit_dict[ "graph_tensor_neuron_network_intrinsic_merit"] = self.graph_tensor_neuron_network_intrinsic_merit intrinsic_merit_dict[ "maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit"] = maximum_per_random_walk_graph_tensor_neuron_network_intrinsic_merit intrinsic_merit_dict["korner_entropy"] = self.entropy intrinsic_merit_dict["density"] = density intrinsic_merit_dict[ "bose_einstein_intrinsic_fitness"] = bose_einstein_intrinsic_fitness intrinsic_merit_dict[ "recursive_gloss_overlap_intrinsic_merit"] = definitiongraph_merit[ 1] intrinsic_merit_dict["empath_sentiment"] = sentiment write_dot(definitiongraph, "RecursiveLambdaFunctionGrowth.dot") self.graph_tensor_neuron_network_intrinsic_merit = 1.0 print "intrinsic_merit_dict:", intrinsic_merit_dict return intrinsic_merit_dict def machine_translation(self, definitiongraph, languagecode): nodes = definitiongraph.nodes() edges = definitiongraph.edges() translationgraph = nx.DiGraph() for k, v in edges: ktrans = self.dictionary.translate(k, languagecode) vtrans = self.dictionary.translate(v, languagecode) print "k=", k, ",v=", v, ",ktrans=", ktrans, ",vtrans=", vtrans translationgraph.add_edge(ktrans, vtrans) translationgraph.add_edge(vtrans, ktrans) print "TextGraph Translated to ", languagecode, ":", translationgraph #KornerEntropy(G) = minimum [- sum_v_in_V(G) {1/|V(G)| * log(Pr[v in Y])}] for each independent set Y def korner_entropy(self, definitiongraph): nodes = definitiongraph.nodes() stable_sets = [] for v in nodes: stable_sets.append( nx.maximal_independent_set(definitiongraph.to_undirected(), [v])) print "korner_entropy(): Stable Independent Sets:", stable_sets entropy = 0.0 prob_v_in_stableset = 0.0 for v in nodes: for s in stable_sets: if v in s: prob_v_in_stableset = math.log(0.999999) else: prob_v_in_stableset = math.log(0.000001) entropy += (-1.0) * float( 1.0 / len(nodes)) * prob_v_in_stableset if entropy < self.entropy: self.entropy = entropy entropy = 0.0 return self.entropy #Graph Density - Regularity Lemma def density(self, definitiongraph): dty = nx.classes.function.density(definitiongraph) return dty #Bose-Einstein Bianconi intrinsic fitness def bose_einstein_intrinsic_fitness(self, definitiongraph): #Bose-Einstein fitness presently assumes energy of a document vertex in a link graph to be #the entropy or extent of chaos in the definition graph of document text #This has to be replaced by a more suitable fitness measure #Bose-Einstein Condensation function value is hardcoded entropy = self.korner_entropy(definitiongraph) becf = 0.3 bei_fitness = math.pow(2, -1 * becf * entropy) return bei_fitness