async def talk(ctx, *, question): answers, answers_rate = inference_internal(question) ans_score = {} answers = inference_internal(question)[0] conclusion = model.predict([bagOWords(question, words)])[0] conclusionIndex = numpy.argmax(conclusion) tag = labels[conclusionIndex] # execute the main.py prototype if conclusion[conclusionIndex] > 0.7: for x in data["Library"]: if x['tag'] == tag: responses = x['responses'] print(conclusion[conclusionIndex]) print(random.choice(responses)) await ctx.send(f'{random.choice(responses)}') # execute the NMT Alanbot else: for i, answer in enumerate(answers): score = scoring.do_scoring(question, answer, answers_rate[i]) ans_score[answer] = score scores = [v for k, v in ans_score.items()] max_score = max(scores) options = [k for k, v in ans_score.items() if v == max_score] choice_answer = random.choice(options) print(choice_answer) await ctx.send(f'{choice_answer}')
def infer(question): answers, answers_rate = inference_internal(question) ans_score = {answer: (answers_rate[i] + scoring.do_scoring(question, answer)) for i, answer in enumerate(answers)} scores = list(ans_score.values()) max_score = max(scores) options = [k for k, v in ans_score.items() if v == max_score] return random.choice(options)
def mainFunction(questionIn): question = questionIn answers, answers_rate = inference_internal(question) ans_score = {} for i, answer in enumerate(answers): score = scoring.do_scoring(question, answer, answers_rate[i]) ans_score[answer] = score scores = [v for k, v in ans_score.items()] max_score = max(scores) options = [k for k, v in ans_score.items() if v == max_score] choice_answer = random.choice(options) return (choice_answer)
answers = inference_helper(question) answers = detokenize(answers) answers = replace_in_answers(answers, 'answers') answers_rate = score_answers(answers, 'answers') return (answers, answers_rate) # interactive mode if __name__ == "__main__": print("\n\nStarting interactive mode (first response will take a while):") colorama.init() # QAs while True: question = input("\n> ") answers, answers_rate = inference_internal(question) ans_score = {} for i, answer in enumerate(answers): score = scoring.do_scoring(question, answer, answers_rate[i]) ans_score[answer] = score scores = [v for k, v in ans_score.items()] max_score = max(scores) options = [k for k, v in ans_score.items() if v == max_score] choice_answer = random.choice(options) print("{}- {}{}".format(colorama.Fore.GREEN, choice_answer, colorama.Fore.RESET)) # maybe print the others? Anything else with a matching highscore green, yellow mid-range... red lowest?
def infer(question): answers, answers_rate = inference_internal(question) ans_score = {answer: (answers_rate[i] + scoring.do_scoring(question, answer)) for i, answer in enumerate(answers)} scores = list(ans_score.values()) max_score = max(scores) options = [k for k, v in ans_score.items() if v == max_score] return random.choice(options) if __name__ == "__main__": import colorama print("\n\nStarting interactive mode (first response will take a while):") colorama.init() # QAs while True: question = input("\n> ") answers, answers_rate = inference_internal(question) ans_score = {answer: (answers_rate[i] + scoring.do_scoring(question, answer)) for i, answer in enumerate(answers)} scores = [v for k, v in ans_score.items()] max_score = max(scores) options = [k for k, v in ans_score.items() if v == max_score] choice_answer = random.choice(options) print("{}- {}{}".format(colorama.Fore.GREEN, choice_answer, colorama.Fore.RESET))