def create_passive(doc, sub_idx, root_idx, obj_index, obj_start, obj_end, negation_availability): # if str(doc[obj_start:obj_end] if len(doc) > obj_end + 2: if negation_availability: if inflect.singular_noun(str(doc[obj_index])) is False: # sentence[root_verb].lemma_ print(str(doc[obj_start:obj_end]) + " is not " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str(doc[obj_end:])) else: print(str(doc[obj_start:obj_end]) + " are not " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str(doc[obj_end:])) else: if inflect.singular_noun(str(doc[obj_index])) is False: print(str(doc[obj_start:obj_end]) + " is " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str(doc[obj_end:])) else: print(str(doc[obj_start:obj_end]) + " are " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str(doc[obj_end:])) # print(negation_availability) else: if negation_availability: if inflect.singular_noun(str(doc[obj_index])) is False: print(str(doc[obj_start:obj_end]) + " is not " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + ".") else: print(str(doc[obj_start:obj_end]) + " are not" + str(getInflection(doc[root_idx].lemma_, tag='VBN')[0])+".") else: if inflect.singular_noun(str(doc[obj_index])) is False: print(str(doc[obj_start:obj_end]) + " is " + str(getInflection(doc[root_idx].lemma_, tag='VBN')[0])+".") else: print(str(doc[obj_start:obj_end]) + " are " + str(getInflection(doc[root_idx].lemma_, tag='VBN')[0])+".") # print(negation_availability) print(" ")
def testGetInflection(self): self.assertEqual( pyinflect.getInflection('xxfocus', 'NN', inflect_oov=False), None) self.assertEqual( pyinflect.getInflection('xxfocus', 'NN', inflect_oov=True), ('xxfocus', )) self.assertEqual( pyinflect.getInflection('xxfocus', 'NNS', inflect_oov=True), ('xxfocuses', 'xxfoci')) self.assertEqual( pyinflect.getInflection('xxban', 'VBG', inflect_oov=True), ('xxbaning', 'xxbanning')) self.assertEqual( pyinflect.getInflection('xxdim', 'JJR', inflect_oov=True), ('xxdimer', 'xxdimmer'))
def makethird(string, male=True): string = string.replace('i ', '', 1) for i in string.split(' '): if pyinflect.getInflection(i, tag='VBZ'): if male: return string.replace( i, pyinflect.getInflection(i, tag='VBZ')[0]).replace( 'my', 'his').replace('mine', 'his').replace( ' i ', ' he ').replace('am', 'is') else: return string.replace( i, pyinflect.getInflection(i, tag='VBZ')[0]).replace( 'my', 'her').replace('mine', 'her').replace( ' i ', ' she ').replace('am', 'is')
def create_passive(doc, root_idx, obj_index, obj_start, obj_end, negation_availability): # 'obj_end + 2' check whether sent ends with fullstop or not. If end with '.' need not to keep space in-between print(doc) try: if len(doc) > obj_end + 2: if negation_availability: if inflect.singular_noun(str(doc[obj_index])) is False: return str(doc[obj_start:obj_end]) + " is not " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str( doc[obj_end:]) else: return str(doc[obj_start:obj_end]) + " are not " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str( doc[obj_end:]) else: if inflect.singular_noun(str(doc[obj_index])) is False: return str(doc[obj_start:obj_end]) + " is " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str( doc[obj_end:]) else: return str(doc[obj_start:obj_end]) + " are " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + " " + str( doc[obj_end:]) # print(negation_availability) else: # sentence ending with object ;need keep a fullstop just after word without a space if negation_availability: if inflect.singular_noun(str(doc[obj_index])) is False: return str(doc[obj_start:obj_end]) + " is not " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + "." else: return str(doc[obj_start:obj_end]) + " are not" + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + "." else: if inflect.singular_noun(str(doc[obj_index])) is False: return str(doc[obj_start:obj_end]) + " is " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + "." else: return str(doc[obj_start:obj_end]) + " are " + str( getInflection(doc[root_idx].lemma_, tag='VBN')[0]) + "." # print(negation_availability) except: return str(doc)
def process(self, statement, additional_response_selection_parameters=None): confidence = 0 dis_word = False if any_in(['distinguish', 'disfigure', 'distinct', 'distinction', 'distant', 'distance', 'distribution', 'distilled'], self.normalized): confidence = 0 else: logging.info( "DisAdapter: Starting Advanced scan. dis_word == {}".format(self.dis)[0]) dis_word = self.dis[3:] logging.info("DisAdapter: Distilled word == {}".format(dis_word)) sia = SentimentIntensityAnalyzer().polarity_scores(dis_word) if dis_word[0] in ['a', 'e', 'i', 'o', 'u', 'g', 'm', 'p']: confidence += 0.4 if 'infect' in dis_word: confidence -= 0.3 if 'spirit' in dis_word: confidence += 0.2 if any_in(['play', 'pensary', 'pense', 'patch', 'port', 'persal', 'perse', 'persion', 'praise'], dis_word): confidence -= 0.2 confidence += sia['neg'] inflection = getInflection( self.chatbot.lp.tokenize(self.dis)[0].lemma_, 'VBD') if inflection is None: past_participle_form_of_verb = self.dis else: past_participle_form_of_verb = inflection[0] if 'you' in self.normalized: response = random_response(DIS_RESPONSES_YOU).format( past_participle_form_of_verb) emotion = Emotion.angry_non_expressive elif 'I' in self.normalized: response = "{} {}".format(random_response( DIS_RESPONSES_I), random_response(CONSOLATION)) emotion = Emotion.angel else: nn = None pn = None tokenized = spac_token(statement, chatbot=self.chatbot) for i in tokenized: if (i.pos_ == "NOUN") or (i.pos_ == 'PROPN'): nn = i.text elif i.pos_ == 'PRON': pn = i.text if not (nn or pn): response = 'Lol. What?' emotion = Emotion.seriously else: response = random_response(DIS_RESPONSES_HIM).format(nn or pn) emotion = Emotion.cry_overflow selected_statement = SugaroidStatement(response, chatbot=True) selected_statement.confidence = confidence selected_statement.emotion = emotion selected_statement.adapter = None return selected_statement
def testGetInflection03(self): self.assertEqual(pyinflect.getAllInflections('watch'), {'NN': ('watch',), 'NNS': ('watches',), 'VB': ('watch',), 'VBP': ('watch',), 'VBD': ('watched',), 'VBN': ('watched',), 'VBG': ('watching',), 'VBZ': ('watches',)}) self.assertEqual(pyinflect.getAllInflections('watch', 'V'), {'VB': ('watch',), 'VBP': ('watch',), 'VBD': ('watched',), 'VBN': ('watched',), 'VBG': ('watching',), 'VBZ': ('watches',)}) self.assertEqual(pyinflect.getInflection('watch', 'VBD'), ('watched',)) self.assertEqual(pyinflect.getAllInflections('watch', 'A'), {})
def singular_sent(negation_availability, sentense, aux_idx, root_verb, base_verb): """conversion of singular sent """ if negation_availability: return str(sentense[:aux_idx]).strip() + " does " + str( sentense[aux_idx + 1:root_verb]).strip() + " " + base_verb + " " + str( sentense[root_verb + 1:]).strip() # VBZ - verb, 3rd person singular present return str(sentense[:aux_idx]).strip() + " " + str( sentense[aux_idx + 1:root_verb]).strip() + getInflection(base_verb, tag='VBZ')[0] + " " + str( sentense[root_verb + 1:]).strip()
def generate(N, j, word, pos, wordseq, replacements): alternatives = [[] for i in range(N)] for i in range(0, N): alternatives[i] = wordseq[:] assert isinstance(replacements, set) rep_lemma, *rep_lemmas = random.sample(replacements, 1) rep = str(rep_lemma) # Inflect to get the right form of the word if not (word.xpos == "JJ" or word.xpos == "NN"): if not (getInflection(rep_lemma, tag=word.xpos) == None): rep, *rep_list = getInflection(rep_lemma, tag=word.xpos) # Mimic capitalization and plurality rep_words = rep.split("_") l = len(rep_words) if (pos == 'n'): rep_words[l - 1] = mimic_plurality(rep_words[l - 1], word.text) rep_words[0] = mimic_capitalization(rep_words[0], word.text) # Fix the form of the determinier, i.e., a or an last_index = j - 1 if (last_index >= 0): last_word = alternatives[i][last_index] if (last_word.lower() == "a" or last_word.lower() == "an"): if starts_with_vowel_sound(rep_words[0].lower()): alternatives[i][last_index] = mimic_capitalization( "an", last_word) else: alternatives[i][last_index] = mimic_capitalization( "a", last_word) alternatives[i][j] = " ".join(rep_words) # for i in range(0, N): # print(" ".join(alternatives[i])) return alternatives
def inflect_sing_plural(self, noun: str) -> (str, str): ''' Inflect plural and singular for word. :param noun: noun to inflect :return: sing and plural of noun ''' if noun in self.inflection_dict: sing = noun plu = self.inflection_dict[noun] else: lemma = LANG_MODEL(noun)[0].lemma_ sing = getInflection(lemma, 'NN') plu = getInflection(lemma, 'NNS') if sing is not None and plu is not None: sing = sing[0] plu = plu[0] else: raise WordNotSupportedError( 'Pyinflect could not inflect {}. Please add word ' 'to inflection dict.'.format(noun)) return sing, plu
def disambiguate_verb_kgtk() -> Any: """Disambiguates verbs from event description and return candidate qnodes. Returns: A JSON response. """ if not request.json: abort(HTTPStatus.BAD_REQUEST) event_description = request.json["event_description"] cleaned_description = event_description.replace("/", " ").replace("_", " ") event_verb = get_verb_lemma(nlp, cleaned_description) cached_file = KGTK_EVENT_CACHE / f"{event_verb}.json" if cached_file.is_file(): with open(cached_file) as f: return json.load(f) kgtk_json = get_request_kgtk(event_verb) event_verb_participle = getInflection(event_verb, tag="VBG") if event_verb_participle and event_verb_participle != event_verb: kgtk_json += get_request_kgtk(event_verb_participle[0]) if not kgtk_json: return {"event_verb": kgtk_json, "options": []} unique_candidates = filter_duplicate_candidates(kgtk_json) options = [] top3 = wikidata_topk(SS_MODEL, cleaned_description, unique_candidates, k=3) for candidate in top3: option = { "qnode": candidate["qnode"], "rawName": candidate["label"][0], "definition": candidate["description"][0], } if option not in options: options.append(option) response = {"event_verb": event_verb, "options": options} with open(cached_file, "w") as f: json.dump(response, f) return response
def testGetInflection04(self): self.assertEqual(pyinflect.getAllInflections('watch', 'A'), {}) self.assertEqual(pyinflect.getInflection('watch', 'JJ'), None) self.assertEqual(pyinflect.getInflection('watch', 'VBD'), ('watched', ))
def testGetInflection02(self): self.assertEqual(pyinflect.getInflection('squirrel', 'NN'), ('squirrel', )) self.assertEqual(pyinflect.getInflection('squirrel', 'NNS'), ('squirrels', 'squirrel'))
def singular_sent(sentence, root_verb, base_verb): """conversion of singular sent """ # get the singular present tense verb form return str(sentence[:root_verb]).strip() + " " + getInflection( base_verb, tag='VBZ')[0] + " " + str( sentence[root_verb + 1:]).strip()
#!/usr/bin/python3 import sys sys.path.insert(0, '..') # make '..' first in the lib search path from pyinflect import getAllInflections, getInflection if __name__ == '__main__': print(getAllInflections('be')) print(getInflection('be', tag='VBD')) print() print(getAllInflections('watch')) print(getAllInflections('watch', pos_type='V')) print(getInflection('watch', tag='VBD')) print()
def interact_model( #model_name='1558M', model_name='fanfic', seed=None, nsamples=1, batch_size=1, length=150, temperature=1, top_k=0, ): os.environ['KMP_WARNINGS'] = 'off' inventory = set() if batch_size is None: batch_size = 1 assert nsamples % batch_size == 0 f = open("carrying.txt", "r", errors='ignore') if f.mode == 'r': carrying_prompt = f.read() f = open("items.txt", "r", errors='ignore') if f.mode == 'r': items_prompt = f.read() f = open("animate.txt", "r", errors='ignore') if f.mode == 'r': animate_prompt = f.read() tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained("model_v5") while True: np.random.seed(seed) print("***") msg = ( "\n\n\n\n\n\n\n\n\n\n\nINFINITE ADVENTURE\n\n\n\n" "INSTRUCTIONS: Infinite Adventure is primarily an exploration " "game. You can type anything you want at the prompt, as " "long as it starts with a verb. A few verbs have special effects:\n\n" " * go LOCATION -- takes you to that place. If the place already has a " "description, it appears down below. If not, you can still go " "there if it appears in the description but it takes a minute " "to generate the new description.\n\n" " * get OBJECT -- allows you to pick up some objects that appear in the " "description. If they are too big or can't be picked up, the " "system will tell you.\n\n" " * use OBJECT -- will only work with items in your inventory.\n\n" " * drop OBJECT -- drops an object from your inventory.\n\n" " * talk PERSON -- talk to a person in the current room. \n\n" " * inventory -- prints your inventory.\n\n" " * observe -- shows a list of items in the room. Useful if you " " want to pick something up not explicitly in the description.\n\n" " * fight -- asks opponent, what you want to do, and what weapon " "from your inventory (or fists, foot, etc...) you want to use.\n\n" " * save -- saves the game.\n\n" " * regenerate -- changes the description of the current room to a " "new one. This is mainly used for fixing descriptions that don't " "make sense.\n\n" " * quit -- quits the game.\n\n" "Each location records what has happened at that location and uses " "that record to decide what happens next. You can, however, bring " "objects from one location to another.\n\n\n\n") print(msg) input_load_flag = input("Would you like to load a game? (Y or N) >>>") if input_load_flag == "Y" or input_load_flag == "y": ### load game interactive_flag = 1 print("please choose:") files = [] for file in os.listdir("./"): if file.endswith(".pkl"): print(file[:-4]) files.append(file) filename = "" while not (filename in files): filename = input("please check spelling >>> ") if filename[-4:] != ".pkl": filename = filename + ".pkl" file = open("./" + filename, 'rb') data = pickle.load(file) descriptions = data[0] rooms = data[1] room_connections = data[2] input_persona = data[3] input_location = data[4] input_atmosphere = data[5] current_room = 0 inventory = set() if len(data) > 6: inventory = set(data[6]) current_room = data[7] else: ### start a new game without loading precalculate = input( "Would you like to pregenerate room descriptions? It takes some time upfront, but gameplay is faster. (Y or N) >>> " ) if precalculate in {"y", "Y", "yes"}: interactive_flag = 0 else: interactive_flag = 1 input_persona = input( "Describe your character in a few words. You are >>> ") input_location = input( "Describe where the adventure takes place in a few words.\n You would like to explore the >>> " ) input_atmosphere = input( "Describe the feeling of the place in a few adjectives, separated by commas. The " + input_location + " is >>> ") if input_location.startswith('the '): input_location = input_location[4:] if input_location.startswith('The '): input_location = input_location[4:] f = open("rooms.txt", "r", errors='ignore') if f.mode == 'r': contents = f.read() raw_text = contents + "\r\n" + input_atmosphere + " " + input_location + ":" print('generating places in the ' + input_location + '...') rooms = [] for _ in range(4 // batch_size): print("*", end=" ") text = generate(tokenizer, model, raw_text, 70, 30) rooms = rooms + rooms_cleanup(text) #remove duplicates from the list of rooms set_rooms = set(rooms) rooms = list(set_rooms) print(rooms) room_connections = create_graph(len(rooms), len(rooms) * 3) print(room_connections) descriptions = ['' for i in range(0, len(rooms) + 1)] current_room = 0 generate_more_flag = 0 describe_flag = 1 while True: ### main loop if "".__eq__(descriptions[current_room]): # print("\n" + rooms[current_room] + "\n") description_prompt = 'The following excerpt from a novel is a long and detailed description of the ' + input_atmosphere + ' things found in the ' + rooms[ current_room] + ':\nYou were ' + input_persona + '. You were in the ' + rooms[ current_room] + ' within the ' + input_location + '. Here is what you saw there:' #description_prompt = 'You were ' + input_persona + '. Previously, you were within a small stone room. Here is what you beheld: There were two chairs and a small table covered with various odds and ends. There was one small window and the floors were unswept. Later, you were in the ' + rooms[current_room] + ' of a ' + input_atmosphere + ' ' + input_location + '. You looked about at the furnishings of the ' + rooms[current_room] +'.' print("running description generator") text = generate(tokenizer, model, description_prompt, 120, 30) descriptions[current_room] = description_cleanup(text) if describe_flag == 1: print("\n" + rooms[current_room] + "\n") wrap_print(descriptions[current_room]) print("\n other nearby areas:", end=" ") describe_flag = 0 for index in room_connections[current_room]: print(rooms[index], end=" | ") if interactive_flag == 0: filename = './' + input_location + '.pkl' afile = open(filename, 'wb') pickle_file = (descriptions, rooms, room_connections, input_persona, input_location, input_atmosphere, list(inventory), current_room) pickle.dump(pickle_file, afile) afile.close() describe_flag = 1 print("\n saved game.") if interactive_flag == 1: next_command = input("\n >>>") next_command_split = next_command.split(" ", 1) next_verb = next_command_split[0] next_object = 'none' if len(next_command_split) > 1: next_object = next_command_split[1] next_verb_past = getInflection(next_verb.strip(",.:-"), tag='VBD') if next_verb_past is None: next_verb_past = "went" next_verb = "go" next_object = "ERROR" else: next_verb_past = next_verb_past[0] ### verb handling if next_verb == "go": describe_flag = 0 for i in range(0, len(rooms)): if rooms[i].lower() == next_object.lower(): current_room = i describe_flag = 1 if describe_flag == 0: if next_object in descriptions[current_room]: rooms.append(next_object) room_list_length = len(rooms) room_connections.append({current_room}) room_connections[current_room].add( room_list_length - 1) current_room = room_list_length - 1 descriptions.append("") describe_flag = 1 else: wrap_print( "Sorry, I don't recognize that. Please choose a place using a word from the description." ) elif next_verb in {"get", "grab"}: if next_object in inventory: print("You already have that.") elif next_object in descriptions[current_room]: carrying_generator = carrying_prompt + "\n" + next_object + ":" print("checking to see if you can get the object... ") text = generate(tokenizer, model, carrying_generator, 10, 1) answer = text.split(" ") if answer[1] == "okay": print("You pick up the " + next_object + ".") descriptions[current_room] = descriptions[ current_room] + "You picked up the " + next_object + ". " inventory.add(next_object) print("inventory: ") print(inventory) elif answer[1] == "too": print("that's too big to carry.") else: print("I don't know how to do that.") else: wrap_print( "Sorry, I don't recognize that. Please choose an object to get using a word from the description." ) elif next_verb == "save": filename = './' + input_location + '.pkl' afile = open(filename, 'wb') pickle_file = (descriptions, rooms, room_connections, input_persona, input_location, input_atmosphere, inventory, current_room) pickle.dump(pickle_file, afile) afile.close() print("\n saved game.") elif next_verb == "observe": print( "generating list of some objects found in this room..." ) items = [] for _ in range(3 // batch_size): text = generate( tokenizer, model, items_prompt + "\n" + rooms[current_room] + "of" + input_location + ":", 40, 30) items = items + rooms_cleanup(text) #remove duplicates from the list of items set_items = set(items) items = list(set_items) comma_separated = ', '.join(items) outtext = "\nThe following objects are also in the room: " + comma_separated wrap_print(outtext) descriptions[ current_room] = descriptions[current_room] + outtext elif next_verb == "talk": partner = next_object if partner in descriptions[current_room]: is_animate = animate_prompt + "\n" + partner + ":" text = generate(tokenizer, model, is_animate, 4, 1) animate_split = text.split("\n", 1) if animate_split[0] == " inanimate": print("The " + partner + " just sit/s there.\n") continue_chat = "n" else: print("Talking to " + next_object) continue_chat = "y" random_room = random.choice(rooms) full_talk_prompt = 'If the ' + partner + ' wants ' + input_persona + ' to go to the library, the ' + partner + ' might say, "You really need to the library." If ' + input_persona + ' is supposed to go to the beach, the ' + partner + ' could say, "It is important for you to find the beach." If ' + input_persona + ' has to go to the ' + random_room + ', the ' + partner + ' would say something like, "' text = generate(tokenizer, model, full_talk_prompt, 50, 30) split_text = text.split('"') response = split_text[0] full_talk_prompt = partner + ' says, "' + response + '"\n' wrap_print(full_talk_prompt) while continue_chat == "y": you_say = input( "What do you say? (Just press enter to quit chat mode.) >>>" ) if you_say == '': continue_chat = "n" descriptions[current_room] = descriptions[ current_room] + '\nYou spoke with ' + partner + '.\n' else: talk_prompt = full_talk_prompt + input_persona + ' says, "' + you_say + '"\n' + partner + ', still trying to persuade ' + input_persona + ' to go to the ' + random_room + ' says: "' #print("talk_prompt = " + talk_prompt) text = generate(tokenizer, model, talk_prompt, 50, 30) #print("text =" + text) split_text = text.split('"') #print(split_text) response = split_text[0] printed = partner + ' says, "' + response + '"' wrap_print(printed) full_talk_prompt = talk_prompt + response + '"\n' #print("full_talk_prompt = " + full_talk_prompt) else: print( 'try "talk PERSON" where "PERSON" is in the room description.' ) elif next_verb == "regenerate": descriptions[current_room] = '' print("\n regenerating room description...") describe_flag = 1 elif next_verb == "drop": if next_object in inventory: print("You drop the " + next_object + ".") inventory.remove(next_object) descriptions[current_room] = descriptions[ current_room] + "You dropped the " + next_object + " here. " else: print("That item isn't in your inventory.") elif next_verb == "use": for item in inventory: if next_object.startswith(item): print("you use the " + next_object + ".") descriptions[current_room] = descriptions[ current_room] + "You used the " + next_object + ". " generate_more_flag = 1 if generate_more_flag == 0: print("You don't have that in your inventory.") elif next_verb in {"fight", "punch", "stab", "attack", "kill"}: #check if the enemy is in the description hp = [0, 10, 10] enemy = input("opponent >>>") if enemy in descriptions[current_room]: continue_fight = "y" else: continue_fight = "n" print( "That opponent doesn't appear in the room description." ) if continue_fight == "y": is_animate = animate_prompt + "\n" + enemy + ":" text = generate(tokenizer, model, is_animate, 4, 1) animate_split = text.split("\n", 1) if animate_split[0] == " inanimate": print("The " + enemy + " just sit/s there.\n") continue_fight = "n" #start the "continue fight" loop weapon = 'fists' while continue_fight == "y": #get the action raw_action = input("action (e.g. stab) >>> ") action_split = raw_action.split(" ", 1) action_present = action_split[0] action = getInflection(action_present, tag='VBD') if action is None: action = "hit" else: if action[0] is None: action = "hit" else: action = action[0] if action in { "quit", "stopped", "surrendered", "hid", "escaped", "ran", "fled" }: print("You got away.") continue_fight = "n" break #get the weapon and check it is in inventory new_weapon = input( "weapon (or press enter to use the same weapon) >>> " ) if new_weapon == '': pass else: weapon = new_weapon possible_weapons = inventory.union({ "fists", "fist", "knee", "foot", "elbow", "head", "forehead", "finger", "fingers", "teeth", "voice", "hands", "hand", "feet", "knees", "elbows" }) for possible in possible_weapons: if possible.endswith(weapon): weapon = possible if weapon in possible_weapons: #generate response start_sentence = "You " + action + " " + enemy + " with your " + weapon prompt = "You are " + input_persona + ". Your adversary, " + enemy + ", faced off agaist you. You attacked with a mighty stroke, slicing " + enemy + "'s arm.\n" + enemy + " fought back, wounding your shoulder.\n You pressed your attack, wounding " + enemy + "'s leg.\n" + enemy + " tried again, but missed.\nYou pressed forward and took a mighty swing, but " + enemy + " escaped.\n" + enemy + " charged, dealing a heavy wound.\nYou managed to deal a nearly fatal blow, almost killing " + enemy + ".\n" + enemy + " let loose an onslaught, lightly wounding your arm.\nYou struck, but " + enemy + " got away unscathed.\n" + enemy + " retaliated with a barrage, doing heavy damage.\nYou fought back, rushing " + enemy + " and knocking " + enemy + " to the ground.\nYou rallied and caught " + enemy + " offguard.\n" + enemy + " blocked and returned the attack with a vicious strike.\nYou managed to get past " + enemy + "'s defenses and dealt a wound.\n" + enemy + " lunged, but missed by a mile.\nYou feinted to the left and struck to the right, but missed doing any damage.\n" + enemy + " knocked you off your feet with a heavy blow.\nYou fired away, successfully penetrating " + enemy + "'s defense.\n" + start_sentence text = generate(tokenizer, model, prompt, 40, 30) text = other_cleanup(text) text = start_sentence + text sentences = text.split("\n") paragraph_length = len(sentences) sentences = sentences[0:min(paragraph_length, 3)] for sentence in sentences: sentence = sentence.strip() if sentence == '': sentences.remove(sentence) # parse the response for sentence in sentences: #default is that no one is damaged damaged = 0 # if the sentence starts with "you", then the enemy is the one damaged yous = {'You', 'you'} for term in yous: if sentence.startswith(term): damaged = 1 for term in yous: # if the word "you" occurs somewhere else in the sentence, the player is the one damaged if term in sentence: if damaged == 0: damaged = 2 wases = {'was', 'were'} swapflag = 0 # if the sentence is a passive sentence, then who is damaged gets swapped for term in wases: if term in sentence: swapflag = 1 if swapflag == 1: if damaged == 2: damaged = 1 else: damaged = 2 # damage_flag=1 # if someone is killed, the damage brings hitpoints to zero kills = { "kills", "killed", "slay", "slayed", "slays" } for term in kills: if term in sentence: hp[damaged] = 0 continue_fight = "n" you_die = { "you die", "you are killed", "you are slain", "you are dead", "You die", "You are killed", "You are slain", "You are dead" } for term in you_die: if term in sentence: hp[2] = 0 continue_fight = "n" dies = {"dies", "died", "die"} for term in dies: if term in sentence: if hp[2] > 0: hp[1] = 0 continue_fight = "n" #if a miss is mentioned, assume no damage was done misses = { "escape", "escaped", "escapes", "try", "tried", "tries", "miss", "missing", "missed", "misses", "dodge", "dodges", "dodged", "dodging", "block", "blocks", "blocked", "blocking", "save", "saved", "saving" } for term in misses: if term in sentence: damaged = 0 #if the player or enemy are damaged, subtract one from their hitpoints if damaged in {1, 2}: hp[damaged] = hp[damaged] - 1 print(sentence) print("enemy hp:", end=" ") print(hp[1]) print("your hp:", end=" ") print(hp[2]) if hp[1] < 1: print("ENEMY KILLED") continue_fight = "n" descriptions[current_room] = descriptions[ current_room] + "\nOn the ground you see the remains of the " + enemy + "." break if hp[2] < 1: print( "YOU WERE KILLED (but we'll pretend you weren't so you can keep playing if you want)" ) continue_fight = "n" break else: print( "You don't seem to have that weapon in your inventory." ) elif next_verb == "inventory": print("inventory:") print(inventory) elif next_verb == "quit": raise SystemExit else: generate_more_flag = 1 #other verbs if generate_more_flag == 1: generate_more_flag = 0 prompt = descriptions[ current_room] + '\nYou ' + next_verb_past + " " + next_object text = generate(tokenizer, model, prompt, 50, 30) text = other_cleanup(text) wrapped = '\nYou ' + next_verb_past + " " + next_object + text wrap_print(wrapped) descriptions[current_room] = prompt + text next_verb = "" ### this steps through all the rooms generating them if interactive_flag == 0: current_room = current_room + 1 if current_room > len(rooms) - 1: current_room = 0 print( "\n\n\n\n***** Pregeneration finished. Have fun! *****\n\n\n\n" ) interactive_flag = 1
def process(self, statement, additional_response_selection_parameters=None): confidence = 0 dis_word = False if any_in( [ "distinguish", "disfigure", "distinct", "distinction", "distant", "distance", "distribution", "distilled", ], self.normalized, ): confidence = 0 else: logging.info( "DisAdapter: Starting Advanced scan. dis_word == {}".format( self.dis)[0]) dis_word = self.dis[3:] logging.info("DisAdapter: Distilled word == {}".format(dis_word)) sia = SentimentIntensityAnalyzer().polarity_scores(dis_word) if dis_word[0] in ["a", "e", "i", "o", "u", "g", "m", "p"]: confidence += 0.4 if "infect" in dis_word: confidence -= 0.3 if "spirit" in dis_word: confidence += 0.2 if any_in( [ "play", "pensary", "pense", "patch", "port", "persal", "perse", "persion", "praise", ], dis_word, ): confidence -= 0.2 confidence += sia["neg"] inflection = getInflection( self.chatbot.lp.tokenize(self.dis)[0].lemma_, "VBD") if inflection is None: past_participle_form_of_verb = self.dis else: past_participle_form_of_verb = inflection[0] if "you" in self.normalized: response = random_response(DIS_RESPONSES_YOU).format( past_participle_form_of_verb) emotion = Emotion.angry_non_expressive elif "I" in self.normalized: response = "{} {}".format(random_response(DIS_RESPONSES_I), random_response(CONSOLATION)) emotion = Emotion.angel else: nn = None pn = None tokenized = spac_token(statement, chatbot=self.chatbot) for i in tokenized: if (i.pos_ == "NOUN") or (i.pos_ == "PROPN"): nn = i.text elif i.pos_ == "PRON": pn = i.text if not (nn or pn): response = "Lol. What?" emotion = Emotion.seriously else: response = random_response(DIS_RESPONSES_HIM).format(nn or pn) emotion = Emotion.cry_overflow selected_statement = SugaroidStatement(response, chatbot=True) selected_statement.confidence = confidence selected_statement.emotion = emotion selected_statement.adapter = None return selected_statement
if len(mappings) == 1: best_infl_word = mappings[0][ 0] # mappings is a list of (word, count) best_infl_count = mappings[0][1] elif len(mappings) > 1: # Choose the one with the highest count. If equal, choose alphabetically. # Note that counts are rarely equal when using the entire corpus. This mostly occurs # for mispellings that only show-up once and these will get filtered out by "req_count". # So, we won't be too concerned if alphabetical isn't the perfect fall-back heuristic. mappings = sorted(mappings, key=lambda x: x[0]) # sort alphabetically mappings = sorted(mappings, key=lambda x: x[1], reverse=True) # sort highest count first best_infl_word = mappings[0][0] best_infl_count = mappings[0][1] # Write out for info / debug multiples_f.write(' %s/%s -> %s\n' % (lemma, tag, str(mappings))) # Skip overrides for cases where there's only a few instances in the corpus if best_infl_count < req_count: continue # Now that we know what we want the lemma/tag to inflect to, check with pyinflect to see # what it's actually doing and if it's different, write an override. infl_list = pyinflect.getInflection(lemma, tag) infl = infl_list[0] if infl_list else '' # choose form 0, the default if infl != best_infl_word: overrides_f.write('%s,%s,%s\n' % (lemma, tag, best_infl_word)) multiples_f.close() overrides_f.close() print('Overrides file saved to: ', overrides_fn) print('Multiple entries saved to: ', multiples_fn)
def inflect_verb(self, verb, tag='VBG'): try: return pyinflect.getInflection(verb, tag=tag)[0] except: raise 'Couldn\'t inflect verb "{}"'.format(verb)
def informal_word_detection(self, sent_list): """detection and replacement of informal words with formal words""" # get the punctuations for the manipulation punctuation_list = string.punctuation # define matchers used for replacement purpose matcher_rule = Matcher(nlp.vocab) matcher_phrase = PhraseMatcher(nlp.vocab) # define different types of verbs verb_types = ["VB", "VBD", "VBG", "VBN", "VBP", "VBZ"] # get the list of informal word list with open('Model/informal_word_list.txt', 'r') as file: informal_word_list = ["" + line.strip() + "" for line in file] # get the list of formal word list with open('Model/formal_word_list.txt', 'r') as file: formal_word_list = ["" + line.strip() + "" for line in file] phrase_list = list() for i in range(len(informal_word_list)): try: # get the words that matcher informal word list word = informal_word_list[i] # check whether the word length is 1 and it's a verb if len(word.split()) == 1 and str( nlp(word)[0].tag_) in verb_types: # apply the rule base matching # get the base verb of the selected verb pattern = [{'LEMMA': word}, {'IS_PUNCT': True, 'OP': '?'}] # match with according to matcher_rule matcher_rule.add(str(i), None, pattern) else: # assign the words to the list(phrase_list) that need to formalize with phrase matching technique phrase_list.append(word) except Exception: continue # tokenize the phrases phrase_patterns = [nlp(text) for text in phrase_list] # match with according to matcher_phrase concept - direct phrase replacement matcher_phrase.add('Informal word matcher', None, *phrase_patterns) for i in range(len(sent_list)): # sentence tokenized sentense = nlp(sent_list[i]) # check for matching with respect to rule base technique in the sentence matches_1 = matcher_rule(sentense) # check for matching with respect to phrase base technique in the sentence matches_2 = matcher_phrase(sentense) # unit the two matches into a single matches = matches_1 + matches_2 # sort the matches according to the occurrence of words in the original sentence # with the aim of preventing the complication due to availability of two matches matches.sort(key=lambda x: x[1]) if len(matches) != 0: try: new_sent = "" # declare variable for later use previous_end = None # get match the informal word with formal word for match in matches: # get the informal word of the related match in sentence informal_word = str(sentense[match[1]:match[2]]) # get the tag as word type - of single word match word_type = str(sentense[match[1]:match[2]][0].tag_) # as the informal word list is in base for check for the other possibilities of occurrence # (verb types) # if these conditions as match get them if not informal_word_list.__contains__( informal_word) and word_type in verb_types: # get the index of the base form of those words in informal list index = informal_word_list.index( sentense[match[1]:match[2]][0].lemma_) # get the respective formal word using index. # convert that formal word into initial word_type as detected(tenses) formal_word = getInflection( formal_word_list[index], tag=str(word_type))[0] # applies for the phrase base direct replacement else: index = informal_word_list.index(informal_word) formal_word = formal_word_list[index] # get the respective formal word upon the index # if it indicates a new sentence. if previous_end is None: new_sent = new_sent + str( sentense[:match[1]]).strip( ) + " " + formal_word # if next character is not a punctuation need to put a space if len(sentense) != match[2] and str(sentense[ match[2]]) not in punctuation_list: new_sent = new_sent + " " previous_end = match[2] else: previous_end = match[2] else: # continuation of sentence new_sent = new_sent + str( sentense[previous_end:match[1]]).strip( ) + " " + formal_word # if next character is not a punctuation need to put a space if len(sentense) != match[2] and str(sentense[ match[2]]) not in punctuation_list: new_sent = new_sent + " " previous_end = match[2] else: previous_end = match[2] new_sent = new_sent + str(sentense[previous_end:]).strip() sent_list[i] = new_sent.strip() except Exception: sent_list[i] = str(sentense) # for sent in sent_list: # print(sent) self.tense_conversion_obj.future_tense_det(sent_list)
def get_gerund_word(self, word): gerund = getInflection(word, 'VBG') return [gerund[0]] if gerund is not None else ''
def inflect(word, target): inflected = getInflection(word, target, inflect_oov=True) return inflected[0] if inflected != None else word