def get_evaluation(self, model, x_test, y_true, y_pred, **kwargs): res = None try: res = evaluate_model(model_type=self.model_type, model=model, x_test=x_test, y_pred=y_pred, y_true=y_true, get_score_only=False, **kwargs) except Exception as e: res = evaluate_model(model_type=self.model_type, model=model, x_test=x_test, y_pred=y_pred, y_true=y_true, get_score_only=True, **kwargs) return res
def evaluate(self, **kwargs): """ evaluate a pre-fitted model and save results to a evaluation.json @return: None """ try: model = self._load_model() x_val, y_true = self._prepare_val_data() y_pred = model.predict(x_val) eval_results = evaluate_model(model_type=self.model_type, y_pred=y_pred, y_true=y_true, **kwargs) logger.info(f"saving fit description to {self.evaluation_file}") with open(self.evaluation_file, 'w', encoding='utf-8') as f: json.dump(eval_results, f, ensure_ascii=False, indent=4) except Exception as e: logger.exception(f"error occured during evaluation: {e}")
def fit(self, **kwargs): """ fit a machine learning model and save it to a file along with a description.json file @return: None """ x_train, y_train, x_test, y_test = self._prepare_fit_data() model_class = self._create_model() self.model = model_class(**kwargs) logger.info(f"executing a {self.model.__class__.__name__} algorithm ..") self.model.fit(x_train, y_train) saved = self._save_model(self.model) if saved: logger.info(f"model saved successfully and can be found in the {self.results_path} folder") test_predictions = self.model.predict(x_test) eval_results = evaluate_model(model_type=self.model_type, y_pred=test_predictions, y_true=y_test, **kwargs) fit_description = { "model": self.model.__class__.__name__, "type": self.model_props['type'], "algorithm": self.model_props['algorithm'], "data path": self.data_path, "train data shape": x_train.shape, "test data shape": x_test.shape, "train data size": x_train.shape[0], "test data size": x_test.shape[0], "results path": str(self.results_path), "model path": str(self.default_model_path), "target": self.target, "results on test data": eval_results } try: logger.info(f"saving fit description to {self.description_file}") with open(self.description_file, 'w', encoding='utf-8') as f: json.dump(fit_description, f, ensure_ascii=False, indent=4) except Exception as e: logger.exception(f"Error while storing the fit description file: {e}")