def setup_interactive(shared): """ Build and parse CLI opts. """ parser = setup_args() parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.') parser.add_argument( '--host', default=HOST_NAME, type=str, help='Host from which allow requests, use 0.0.0.0 to allow all IPs', ) SHARED['opt'] = parser.parse_args(print_args=False) SHARED['opt'][ 'task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent' # Create model and assign it to the specified task agent = create_agent(SHARED.get('opt'), requireModelExists=True) SHARED['agent'] = agent SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent']) # show args after loading model parser.opt = agent.opt parser.print_args() return agent.opt
def setup_interactive(): """ Set up the interactive script. """ parser = setup_args() opt = parser.parse_args(print_args=True) if not opt.get("model_file"): raise RuntimeError("Please specify a model file") if opt.get("fixed_cands_path") is None: fcp = os.path.join( "/".join(opt.get("model_file").split("/")[:-1]), "candidates.txt" ) opt["fixed_cands_path"] = fcp opt["override"]["fixed_cands_path"] = fcp opt["task"] = "parlai.agents.local_human.local_human:LocalHumanAgent" opt["image_mode"] = "resnet152" opt["no_cuda"] = True opt["override"]["no_cuda"] = True SHARED["opt"] = opt SHARED["image_loader"] = ImageLoader(opt) # Create model and assign it to the specified task SHARED["agent"] = create_agent(opt, requireModelExists=True) SHARED["world"] = create_task(opt, SHARED["agent"]) # Dialog History SHARED["dialog_history"] = []
def __init__(self, config): self.botmoji = Botmoji() parser = setup_args() opt = parser.parse_args([ '-mf', os.path.join(config.checkpoint[:-len('1.pkl')] + '/transformer') ]) self.transformer = create_agent(opt, requireModelExists=True)
def setup_interactive(shared): parser = setup_args() SHARED['opt'] = parser.parse_args(print_args=True) SHARED['opt']['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent' # Create model and assign it to the specified task SHARED['agent'] = create_agent(SHARED.get('opt'), requireModelExists=True) SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent'])
def _run_test_repeat(self, tmpdir: str, fake_input: FakeInput): outfile = os.path.join(tmpdir, 'log.jsonl') pp = interactive.setup_args() opt = pp.parse_args(['-m', 'repeat_query', '--outfile', outfile]) interactive.interactive(opt) log = conversations.Conversations(outfile) self.assertEqual(len(log), fake_input.max_episodes) for entry in log: self.assertEqual(len(entry), 2 * fake_input.max_turns)
def setup_interweb_args(shared): """ Build and parse CLI opts. """ parser = setup_args() parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.') parser.add_argument( '--host', default=HOST_NAME, type=str, help='Host from which allow requests, use 0.0.0.0 to allow all IPs', ) return parser
def setup_interactive(): """Set up the interactive script.""" parser = setup_args() opt = parser.parse_args(print_args=True) if not opt.get('model_file'): raise RuntimeError('Please specify a model file') if opt.get('fixed_cands_path') is None: opt['fixed_cands_path'] = os.path.join( '/'.join(opt.get('model_file').split('/')[:-1]), 'candidates.txt') opt['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent' opt['image_mode'] = 'resnet152' opt['no_cuda'] = True opt['override']['no_cuda'] = True SHARED['opt'] = opt SHARED['image_loader'] = ImageLoader(opt) # Create model and assign it to the specified task SHARED['agent'] = create_agent(opt, requireModelExists=True) SHARED['world'] = create_task(opt, SHARED['agent'])
def setup_interactive(shared): """ Build and parse CLI opts. """ parser = setup_args() parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.') parser.add_argument('-t', '--task', type=str, default="blended_skill_talk", help='task to use') SHARED['opt'] = parser.parse_args(print_args=False) """ Create model and assign it to the specified task. The only use of the dummy agent is to satisfy the requirement of 2 agents for blended_skill_talk task """ agent = create_agent(SHARED.get('opt'), requireModelExists=True) dummy_agent = create_agent(SHARED.get('opt'), requireModelExists=True) SHARED['agent'] = agent SHARED['dummy_agent'] = dummy_agent SHARED['world'] = create_task(SHARED.get('opt'), [SHARED['dummy_agent'], SHARED['agent']]) """ @todo Identify why do we need to call parley to change personas Load random persona for the agent by calling world.parley. """ SHARED['world'].parley() print(SHARED['world'].display()) # show args after loading model parser.opt = agent.opt parser.print_args() return agent.opt
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Basic example which allows local human keyboard input to talk to a trained model. For documentation, see parlai.scripts.interactive. """ from parlai.scripts.interactive import setup_args, interactive import random if __name__ == '__main__': random.seed(42) parser = setup_args() opt = parser.parse_args() interactive(opt, print_parser=parser)
def test_repeat(self): pp = interactive.setup_args() opt = pp.parse_args( ['-m', 'repeat_query', '-t', 'convai2', '-dt', 'valid'], print_args=False) interactive.interactive(opt)
def test_repeat(self): pp = interactive.setup_args() opt = pp.parse_args(['-m', 'repeat_query'], print_args=False) interactive.interactive(opt)
class GrafbotAgent: parser = setup_args() opt = None agent = None world = None history = [] persona_history = [] ip = "" semkg = SemKG() epikg = EpiKG() polyencoder = None def __init__(self, personality, keywordsId, answers, ip, keywordsCond): self.opt = self.parser.parse_args(print_args=False) self.opt[ 'task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent' self.agent = create_agent(self.opt, requireModelExists=True) self.addStoriesLive(personality) self.learn(personality, keywordsId, answers, keywordsCond) #self.addStoriesLive(personality) self.world = create_task(self.opt, self.agent) self.ip = ip args, self.polyencoderagent = self.initPolyEncoder(ip, personality) def addStoriesLive(self, personality): self.history += personality self.persona_history += personality personalityText = ' \n'.join( ["your persona: " + personaField for personaField in personality]) print(personalityText) if (len(personality) > 0): self.agent.observe({ 'episode_done': False, 'text': personalityText }) def learn(self, sentences, keywordsId, answers, keywordsCond): self.semkg.learn(sentences, keywordsId, answers, keywordsCond) def initPolyEncoder(self, ip, personality): f = open('candidates{}.txt'.format(ip), "w") f.write(' \n'.join(personality)) f.close() args = { 'optimizer': 'adamax', 'learningrate': 5e-05, 'batchsize': 256, 'embedding_size': 768, 'num_epochs': 8.0, 'model': 'transformer/polyencoder', 'n_layers': 12, 'n_heads': 12, 'ffn_size': 3072, 'gradient_clip': 0.1 } args['eval_candidates'] = 'fixed' args['encode_candidate_vecs'] = 'true' args['fixed_candidates_path'] = 'candidates.txt' args['model_file'] = 'zoo:pretrained_transformers/model_poly/model' args['candidates'] = 'batch' args['override'] = { 'model': 'transformer/polyencoder', 'model_file': '/data1/home/mrim/bentebia/anaconda3/envs/grafbot/lib/python3.7/site-packages/data/models/pretrained_transformers/model_poly/model', 'encode_candidate_vecs': True, 'eval_candidates': 'fixed', 'fixed_candidates_path': 'candidates{}.txt'.format(ip) } return args, create_agent(args) def speak(self, reply_text, keywordsUnlocked): print("Reply : " + reply_text) user_language = detect(reply_text) #user_language = "en" english_version_of_user_input = translate_base(reply_text, src=user_language) #english_version_of_user_input = reply_text embedded = concatEmbeddingEn( getContextualEmbedding(english_version_of_user_input, verbose=False)) entities = get_entities(english_version_of_user_input) stories = self.semkg.get_stories(self.epikg, [x[0] for x in entities], [embedded[0][x[1]] for x in entities], keywordsUnlocked) print("STORIES: ") print(stories) if len(stories) > 0: if not stories.iloc[0].answer == '': self.history.append(english_version_of_user_input) json_return = dict() if (user_language != "en"): json_return['text'] = process_output_chatbot( stories.iloc[0].answer) json_return['text'] = translate_base( stories.iloc[0].answer, dest=user_language) else: json_return['text'] = process_output_chatbot( stories.iloc[0].answer) json_return['user_lang'] = user_language json_return['stories'] = [stories.iloc[0].sentence] json_return['score'] = [stories.iloc[0].distance] json_return['keywordsId'] = [stories.iloc[0].keywordsId] return jsonify(json_return) else: if len(stories) > 1: m = min(3, len(stories)) good_stories = [] print("CREATE CANDIDATES", flush=True) for p in range(m): os.remove('candidates{}.txt'.format(self.ip)) args, self.polyencoderagent = self.initPolyEncoder( self.ip, [ e for e in list(stories.sentence.values) if not e in good_stories ]) print("OBSERVE", flush=True) self.polyencoderagent.observe({ 'episode_done': False, 'text': ' \n'.join(self.persona_history) + '\n' + english_version_of_user_input }) print("ACT", flush=True) res = self.polyencoderagent.act() print("PRINT ACT", flush=True) print(res, flush=True) good_stories.append(res['text']) self.addStoriesLive(good_stories) else: print("I don't remember anything", flush=True) self.history.append(english_version_of_user_input) print(self.history, flush=True) self.agent.observe({ 'episode_done': False, 'text': english_version_of_user_input }) model_res = self.agent.act() print(model_res, flush=True) json_return = dict() if (user_language != "en"): json_return['text'] = process_output_chatbot( model_res['text']) json_return['text'] = translate_base(json_return['text'], dest=user_language) else: json_return['text'] = process_output_chatbot( model_res['text']) json_return['user_lang'] = user_language json_return['stories'] = good_stories json_return['score'] = list(stories[stories.sentence.isin( good_stories)].distance.values) json_return['keywordsId'] = list() return jsonify(json_return) else: self.history.append(english_version_of_user_input) print(self.history, flush=True) self.agent.observe({ 'episode_done': False, 'text': english_version_of_user_input }) model_res = self.agent.act() print(model_res, flush=True) json_return = dict() if (user_language != "en"): json_return['text'] = process_output_chatbot(model_res['text']) json_return['text'] = translate_base(json_return['text'], dest=user_language) else: json_return['text'] = process_output_chatbot(model_res['text']) json_return['user_lang'] = user_language json_return['stories'] = list() json_return['score'] = list() json_return['keywordsId'] = list() return jsonify(json_return) def get(self, val): if val == 'opt': return self.opt elif val == 'agent': return self.agent elif val == 'world': return self.world else: return None