def start_training(bot: str, user: str): """ Prevents training of the bot if the training session is in progress otherwise start training """ exception = None model_file = None training_status = None ModelProcessor.set_training_status( bot=bot, user=user, status=MODEL_TRAINING_STATUS.INPROGRESS.value, ) try: model_file = train_model_for_bot(bot) training_status = MODEL_TRAINING_STATUS.DONE.value except Exception as e: logging.exception(e) training_status = MODEL_TRAINING_STATUS.FAIL.value exception = str(e) raise AppException(exception) finally: ModelProcessor.set_training_status( bot=bot, user=user, status=training_status, model_path=model_file, exception=exception, ) AgentProcessor.reload(bot) return model_file
async def train(current_user: User = Depends(auth.get_current_user)): model_file = await train_model_from_mongo(current_user.get_bot()) AgentProcessor.reload(current_user.get_bot()) return { "data": { "file": model_file }, "message": "Model trained successfully" }
async def download_file(current_user: User = Depends(auth.get_current_user), ): """Download latest trained model file""" try: model_path = AgentProcessor.get_latest_model(current_user.get_bot()) return FileResponse(model_path) except Exception as e: return AppException(str(e))
def test_get_agent(self, monkeypatch): def mongo_store(*arge, **kwargs): return None monkeypatch.setattr(Utility, "get_local_mongo_store", mongo_store) agent = AgentProcessor.get_agent("tests") assert isinstance(agent, Agent)
async def chat(request_data: TextData, current_user: User = Depends(auth.get_current_user)): """ This function returns a bot response for a given text/query. It is basically used to test the chat functionality of the bot """ model = AgentProcessor.get_agent(current_user.get_bot()) response = await model.handle_text(request_data.data, sender_id=current_user.get_user()) return {"data": {"response": response[0]["text"] if response else None}}
async def predict_intent(request_data: TextData, current_user: User = Depends(auth.get_current_user)): model = AgentProcessor.get_agent(current_user.get_bot()) response = await model.parse_message_using_nlu_interpreter( request_data.data) intent = response.get("intent").get("name") if response else None confidence = response.get("intent").get("confidence") if response else None return {"data": {"intent": intent, "confidence": confidence}}
async def predict_intent(request_data: TextData, current_user: User = Depends(auth.get_current_user)): """ This function returns the predicted intent of the entered text by using the trained rasa model of the chatbot """ model = AgentProcessor.get_agent(current_user.get_bot()) response = await model.parse_message_using_nlu_interpreter( request_data.data) intent = response.get("intent").get("name") if response else None confidence = response.get("intent").get("confidence") if response else None return {"data": {"intent": intent, "confidence": confidence}}
async def download_model( background_tasks: BackgroundTasks, current_user: User = Depends(auth.get_current_user), ): """Download latest trained model file""" try: model_path = AgentProcessor.get_latest_model(current_user.get_bot()) response = FileResponse(model_path, filename=os.path.basename(model_path), background=background_tasks) response.headers[ "Content-Disposition"] = "attachment; filename=" + os.path.basename( model_path) return response except Exception as e: raise AppException(str(e))
def test_get_agent_from_cache_does_not_exists(self): with pytest.raises(AppException): agent = AgentProcessor.get_agent("test") assert isinstance(agent, Agent)
def test_get_agent_from_cache(self): agent = AgentProcessor.get_agent("tests") assert isinstance(agent, Agent)
async def chat(request_data: TextData, current_user: User = Depends(auth.get_current_user)): model = AgentProcessor.get_agent(current_user.get_bot()) response = await model.handle_text(request_data.data) return {"data": {"response": response[0]["text"] if response else None}}