def run_selfchat(self): """ Run selfchat for each model. """ for model in self.models: try: torch.cuda.empty_cache() except Exception: pass self._print_progress(f'Running self-chat for {model}') outfile = self._get_selfchat_log_path(model) if not os.path.exists(outfile): config = self._get_selfchat_config(model) with capture_output(): parser = self_chat_setup_args() parser.set_params(**config) opt = parser.parse_args(args=[]) self_chat(opt) if os.path.exists(outfile): self._print_progress(f'Chats saved to {outfile} for {model}') self._print_progress(f'Chats already exist in {outfile}, moving on...') self.chat_files[model] = outfile
def test_convai2(self): pp = self_chat.setup_args() opt = pp.parse_args([ '-m', 'fixed_response', '--fixed-response', 'hi', '-t', 'convai2', '-dt', 'valid', ]) self_chat.self_chat(opt)
def _run_selfchat(self, config_id: str): """ Run self-chat for model. :param config_id: id in config """ self._print_progress(f'Running self-chat for {config_id}') config = self._get_selfchat_config(config_id) with capture_output(): parser = self_chat_setup_args() parser.set_params(**config) opt = parser.parse_args(args=[]) self_chat(opt)
def test_convo_render(self): """ Test convo render by creating a self-chat, saving it to file and render it to html. """ self_chat_pp = self_chat.setup_args() self_chat_opt = self_chat_pp.parse_args([ '-m', 'fixed_response', '--fixed-response', 'Hey there', '--save-format', 'conversations', '--outfile', 'self_chat_output', ]) self_chat.self_chat(self_chat_opt) convo_render_pp = convo_render.setup_args() convo_render_opt = convo_render_pp.parse_args( ['-i', 'self_chat_output.jsonl', '-o', 'self_chat_output.html']) convo_render.render_convo(convo_render_opt)
def compile_chat_logs(self): """ Compile chat logs. Logs are generated depending on what is specified in the config for the model: 1. If a `model` is provided, run selfchat for model 2. If a `log_path` is provided, simply load the log path 3. If a `task` is provided, convert the task to ACUTE format and load that. """ for model in self.models: try: torch.cuda.empty_cache() except Exception: pass self._print_progress(f'Running self-chat for {model}') outfile = self._get_log_path(model) if not os.path.exists(outfile): if 'model' in self.model_config[model]: config = self._get_selfchat_config(model) with capture_output(): parser = self_chat_setup_args() parser.set_params(**config) opt = parser.parse_args(args=[]) self_chat(opt) elif 'task' in self.model_config[model]: self._convert_task_to_conversations(model) else: raise RuntimeError( f'Path must exist if log_path specified for {model}') if os.path.exists(outfile): self._print_progress( f'Chats saved to {outfile} for {model}') self._print_progress( f'Chats already exist in {outfile}, moving on...') self.chat_files[model] = outfile
def test_vanilla(self): pp = self_chat.setup_args() opt = pp.parse_args(['-m', 'fixed_response', '--fixed-response', 'hi']) self_chat.self_chat(opt)